summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-12-14 05:34:50 -0500
committerMike Pagano <mpagano@gentoo.org>2021-12-14 05:34:50 -0500
commite28a24722b419764ede06dce96691d89e39de4b1 (patch)
tree1e56977be0c3f4f100bf51f18d9d1ebda5537efa
parentRemove redundant patch (diff)
downloadlinux-patches-e28a24722b419764ede06dce96691d89e39de4b1.tar.gz
linux-patches-e28a24722b419764ede06dce96691d89e39de4b1.tar.bz2
linux-patches-e28a24722b419764ede06dce96691d89e39de4b1.zip
Linux patch 5.15.85.15-10
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1007_linux-5.15.8.patch7337
2 files changed, 7341 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 6a35b58f..a50822d5 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1006_linux-5.15.7.patch
From: http://www.kernel.org
Desc: Linux 5.15.7
+Patch: 1007_linux-5.15.8.patch
+From: http://www.kernel.org
+Desc: Linux 5.15.8
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1007_linux-5.15.8.patch b/1007_linux-5.15.8.patch
new file mode 100644
index 00000000..3473443a
--- /dev/null
+++ b/1007_linux-5.15.8.patch
@@ -0,0 +1,7337 @@
+diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+index 2766fe45bb98b..ee42328a109dc 100644
+--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
++++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+@@ -91,6 +91,14 @@ properties:
+ compensate for the board being designed with the lanes
+ swapped.
+
++ enet-phy-lane-no-swap:
++ $ref: /schemas/types.yaml#/definitions/flag
++ description:
++ If set, indicates that PHY will disable swap of the
++ TX/RX lanes. This property allows the PHY to work correcly after
++ e.g. wrong bootstrap configuration caused by issues in PCB
++ layout design.
++
+ eee-broken-100tx:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst
+index ddada4a537493..4fd7b70fcde19 100644
+--- a/Documentation/locking/locktypes.rst
++++ b/Documentation/locking/locktypes.rst
+@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
+ spin_lock(&p->lock);
+ p->count += this_cpu_read(var2);
+
+-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
+-which makes the above code fully equivalent. On a PREEMPT_RT kernel
+ migrate_disable() ensures that the task is pinned on the current CPU which
+ in turn guarantees that the per-CPU access to var1 and var2 are staying on
+-the same CPU.
++the same CPU while the task remains preemptible.
+
+ The migrate_disable() substitution is not valid for the following
+ scenario::
+@@ -456,9 +454,8 @@ scenario::
+ p = this_cpu_ptr(&var1);
+ p->val = func2();
+
+-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
+-here migrate_disable() does not protect against reentrancy from a
+-preempting task. A correct substitution for this case is::
++This breaks because migrate_disable() does not protect against reentrancy from
++a preempting task. A correct substitution for this case is::
+
+ func()
+ {
+diff --git a/Makefile b/Makefile
+index 2d85216b1493d..72344b214bba5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Trick or Treat
+
+diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
+index e5fbf8653a215..2020af88b6361 100644
+--- a/arch/csky/kernel/traps.c
++++ b/arch/csky/kernel/traps.c
+@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
+
+ asmlinkage void do_trap_fpe(struct pt_regs *regs)
+ {
+-#ifdef CONFIG_CPU_HAS_FP
++#ifdef CONFIG_CPU_HAS_FPU
+ return fpu_fpe(regs);
+ #else
+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
+@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
+
+ asmlinkage void do_trap_priv(struct pt_regs *regs)
+ {
+-#ifdef CONFIG_CPU_HAS_FP
++#ifdef CONFIG_CPU_HAS_FPU
+ if (user_mode(regs) && fpu_libc_helper(regs))
+ return;
+ #endif
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 6ce906815bb28..1f96809606ac5 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1919,6 +1919,7 @@ config EFI
+ depends on ACPI
+ select UCS2_STRING
+ select EFI_RUNTIME_WRAPPERS
++ select ARCH_USE_MEMREMAP_PROT
+ help
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index fa24fd4d138df..a11785ebf628b 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -98,7 +98,7 @@
+ KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
+ #define KVM_REQ_TLB_FLUSH_GUEST \
+- KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
++ KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
+ #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
+ #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index b9a13dc211d5d..2092834efba11 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
+
+ all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
+
++ if (all_cpus)
++ goto check_and_send_ipi;
++
+ if (!sparse_banks_len)
+ goto ret_success;
+
+- if (!all_cpus &&
+- kvm_read_guest(kvm,
++ if (kvm_read_guest(kvm,
+ hc->ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
+ sparse_banks,
+@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
++check_and_send_ipi:
+ if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b7aa845f7beee..eff065ce6f8e8 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7021,7 +7021,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+ unsigned short port, void *val, unsigned int count)
+ {
+ if (vcpu->arch.pio.count) {
+- /* Complete previous iteration. */
++ /*
++ * Complete a previous iteration that required userspace I/O.
++ * Note, @count isn't guaranteed to match pio.count as userspace
++ * can modify ECX before rerunning the vCPU. Ignore any such
++ * shenanigans as KVM doesn't support modifying the rep count,
++ * and the emulator ensures @count doesn't overflow the buffer.
++ */
+ } else {
+ int r = __emulator_pio_in(vcpu, size, port, count);
+ if (!r)
+@@ -7030,7 +7036,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+ /* Results already available, fall through. */
+ }
+
+- WARN_ON(count != vcpu->arch.pio.count);
+ complete_emulator_pio_in(vcpu, val);
+ return 1;
+ }
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index b15ebfe40a73e..b0b848d6933af 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
+ return;
+ }
+
+- new = early_memremap(data.phys_map, data.size);
++ new = early_memremap_prot(data.phys_map, data.size,
++ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
+ if (!new) {
+ pr_err("Failed to map new boot services memmap\n");
+ return;
+diff --git a/block/ioprio.c b/block/ioprio.c
+index 313c14a70bbd3..6f01d35a5145a 100644
+--- a/block/ioprio.c
++++ b/block/ioprio.c
+@@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+ pgrp = task_pgrp(current);
+ else
+ pgrp = find_vpid(who);
++ read_lock(&tasklist_lock);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+ tmpio = get_task_ioprio(p);
+ if (tmpio < 0)
+@@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+ else
+ ret = ioprio_best(ret, tmpio);
+ } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
++ read_unlock(&tasklist_lock);
++
+ break;
+ case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index cffbe57a8e086..c75fb600740cc 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
+ __release(&t->lock);
+
+ /*
+- * If this thread used poll, make sure we remove the waitqueue
+- * from any epoll data structures holding it with POLLFREE.
+- * waitqueue_active() is safe to use here because we're holding
+- * the inner lock.
++ * If this thread used poll, make sure we remove the waitqueue from any
++ * poll data structures holding it.
+ */
+- if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+- waitqueue_active(&thread->wait)) {
+- wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
+- }
++ if (thread->looper & BINDER_LOOPER_STATE_POLL)
++ wake_up_pollfree(&thread->wait);
+
+ binder_inner_proc_unlock(thread->proc);
+
+ /*
+- * This is needed to avoid races between wake_up_poll() above and
+- * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+- * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+- * lock, so we can be sure it's done after calling synchronize_rcu().
++ * This is needed to avoid races between wake_up_pollfree() above and
++ * someone else removing the last entry from the queue for other reasons
++ * (e.g. ep_remove_wait_queue() being called due to an epoll file
++ * descriptor being closed). Such other users hold an RCU read lock, so
++ * we can be sure they're done after we call synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d8c3e730f7c2b..4d848cfc406fe 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -3856,6 +3856,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
+ /* Odd clown on sil3726/4726 PMPs */
+ { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
++ /* Similar story with ASMedia 1092 */
++ { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
+
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index fb99e3727155b..547e6e769546a 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+ }
+ EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
++static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
+ {
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+- if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
+- return -EINVAL;
++ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
++ dev_warn(dev, "Resuming from non M3 state (%s)\n",
++ TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
++ if (!force)
++ return -EINVAL;
++ }
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+
+ return 0;
+ }
++
++int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
++{
++ return __mhi_pm_resume(mhi_cntrl, false);
++}
+ EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
++int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
++{
++ return __mhi_pm_resume(mhi_cntrl, true);
++}
++EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
++
+ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+ {
+ int ret;
+diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
+index 59a4896a80309..4c577a7317091 100644
+--- a/drivers/bus/mhi/pci_generic.c
++++ b/drivers/bus/mhi/pci_generic.c
+@@ -20,7 +20,7 @@
+
+ #define MHI_PCI_DEFAULT_BAR_NUM 0
+
+-#define MHI_POST_RESET_DELAY_MS 500
++#define MHI_POST_RESET_DELAY_MS 2000
+
+ #define HEALTH_CHECK_PERIOD (HZ * 2)
+
+diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
+index d3e905cf867d7..b23758083ce52 100644
+--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
++++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
+@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
+ .probe = imx8qxp_lpcg_clk_probe,
+ };
+
+-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
++module_platform_driver(imx8qxp_lpcg_clk_driver);
+
+ MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+ MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index c53a688d8ccca..40a2efb1329be 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
+ },
+ .probe = imx8qxp_clk_probe,
+ };
+-builtin_platform_driver(imx8qxp_clk_driver);
++module_platform_driver(imx8qxp_clk_driver);
+
+ MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+ MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index eaedcceb766f9..8f65b9bdafce4 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
+ void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+ {
++ /*
++ * If the bootloader left the PLL enabled it's likely that there are
++ * RCGs that will lock up if we disable the PLL below.
++ */
++ if (trion_pll_is_enabled(pll, regmap)) {
++ pr_debug("Trion PLL is already enabled, skipping configuration\n");
++ return;
++ }
++
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
+index b2d00b4519634..45d9cca28064f 100644
+--- a/drivers/clk/qcom/clk-regmap-mux.c
++++ b/drivers/clk/qcom/clk-regmap-mux.c
+@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
+ val &= mask;
+
+ if (mux->parent_map)
+- return qcom_find_src_index(hw, mux->parent_map, val);
++ return qcom_find_cfg_index(hw, mux->parent_map, val);
+
+ return val;
+ }
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index 60d2a78d13950..2af04fc4abfa9 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+ }
+ EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
++int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
++{
++ int i, num_parents = clk_hw_get_num_parents(hw);
++
++ for (i = 0; i < num_parents; i++)
++ if (cfg == map[i].cfg)
++ return i;
++
++ return -ENOENT;
++}
++EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
++
+ struct regmap *
+ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+ {
+diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
+index bb39a7e106d8a..9c8f7b798d9fc 100644
+--- a/drivers/clk/qcom/common.h
++++ b/drivers/clk/qcom/common.h
+@@ -49,6 +49,8 @@ extern void
+ qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
+ extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
+ u8 src);
++extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
++ u8 cfg);
+
+ extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate);
+diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
+index 3819ef5b70989..3245eb0c602d2 100644
+--- a/drivers/clocksource/dw_apb_timer_of.c
++++ b/drivers/clocksource/dw_apb_timer_of.c
+@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
+ pr_warn("pclk for %pOFn is present, but could not be activated\n",
+ np);
+
+- if (!of_property_read_u32(np, "clock-freq", rate) &&
++ if (!of_property_read_u32(np, "clock-freq", rate) ||
+ !of_property_read_u32(np, "clock-frequency", rate))
+ return 0;
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index dc995ce52eff2..fef13e93a99fd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2213,7 +2213,8 @@ static int dm_resume(void *handle)
+ if (amdgpu_in_reset(adev)) {
+ dc_state = dm->cached_dc_state;
+
+- amdgpu_dm_outbox_init(adev);
++ if (dc_enable_dmub_notifications(adev->dm.dc))
++ amdgpu_dm_outbox_init(adev);
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+@@ -2262,6 +2263,10 @@ static int dm_resume(void *handle)
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+ dc_resource_state_construct(dm->dc, dm_state->context);
+
++ /* Re-enable outbox interrupts for DPIA. */
++ if (dc_enable_dmub_notifications(adev->dm.dc))
++ amdgpu_dm_outbox_init(adev);
++
+ /* Before powering on DC we need to re-initialize DMUB. */
+ r = dm_dmub_hw_init(adev);
+ if (r)
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index c9a9d74f338c1..c313a5b4549c4 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
+
+ if (*fence) {
+ ret = dma_fence_chain_find_seqno(fence, point);
+- if (!ret)
++ if (!ret) {
++ /* If the requested seqno is already signaled
++ * drm_syncobj_find_fence may return a NULL
++ * fence. To make sure the recipient gets
++ * signalled, use a new fence instead.
++ */
++ if (!*fence)
++ *fence = dma_fence_get_stub();
++
+ goto out;
++ }
+ dma_fence_put(*fence);
+ } else {
+ ret = -EINVAL;
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 3c33bf572d6d3..9235ab7161e3a 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -207,14 +207,14 @@ config HID_CHERRY
+
+ config HID_CHICONY
+ tristate "Chicony devices"
+- depends on HID
++ depends on USB_HID
+ default !EXPERT
+ help
+ Support for Chicony Tactical pad and special keys on Chicony keyboards.
+
+ config HID_CORSAIR
+ tristate "Corsair devices"
+- depends on HID && USB && LEDS_CLASS
++ depends on USB_HID && LEDS_CLASS
+ help
+ Support for Corsair devices that are not fully compliant with the
+ HID standard.
+@@ -245,7 +245,7 @@ config HID_MACALLY
+
+ config HID_PRODIKEYS
+ tristate "Prodikeys PC-MIDI Keyboard support"
+- depends on HID && SND
++ depends on USB_HID && SND
+ select SND_RAWMIDI
+ help
+ Support for Prodikeys PC-MIDI Keyboard device support.
+@@ -553,7 +553,7 @@ config HID_LENOVO
+
+ config HID_LOGITECH
+ tristate "Logitech devices"
+- depends on HID
++ depends on USB_HID
+ depends on LEDS_CLASS
+ default !EXPERT
+ help
+@@ -919,7 +919,7 @@ config HID_SAITEK
+
+ config HID_SAMSUNG
+ tristate "Samsung InfraRed remote control or keyboards"
+- depends on HID
++ depends on USB_HID
+ help
+ Support for Samsung InfraRed remote control or keyboards.
+
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index f3ecddc519ee8..08c9a9a60ae47 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ drvdata->tp = &asus_i2c_tp;
+
+- if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
+- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
++ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
+@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ drvdata->tp = &asus_t100chi_tp;
+ }
+
+- if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
++ if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
+ struct usb_host_interface *alt =
+ to_usb_interface(hdev->dev.parent)->altsetting;
+
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index db6da21ade063..74ad8bf98bfd5 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
+ struct bigben_device, worker);
+ struct hid_field *report_field = bigben->report->field[0];
+
+- if (bigben->removed)
++ if (bigben->removed || !report_field)
+ return;
+
+ if (bigben->work_led) {
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index ca556d39da2ae..f04d2aa23efe4 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int ret;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ ret = hid_parse(hdev);
+ if (ret) {
+diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
+index 902a60e249ed2..8c895c820b672 100644
+--- a/drivers/hid/hid-corsair.c
++++ b/drivers/hid/hid-corsair.c
+@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct corsair_drvdata *drvdata;
+- struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
++ struct usb_interface *usbif;
++
++ if (!hid_is_usb(dev))
++ return -EINVAL;
++
++ usbif = to_usb_interface(dev->dev.parent);
+
+ drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
+ GFP_KERNEL);
+diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
+index 021049805bb71..3091355d48df6 100644
+--- a/drivers/hid/hid-elan.c
++++ b/drivers/hid/hid-elan.c
+@@ -50,7 +50,7 @@ struct elan_drvdata {
+
+ static int is_not_elan_touchpad(struct hid_device *hdev)
+ {
+- if (hdev->bus == BUS_USB) {
++ if (hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ return (intf->altsetting->desc.bInterfaceNumber !=
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index 383dfda8c12fc..8e960d7b233b3 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ int ret;
+ struct usb_device *udev;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
+index 4ef1c3b8094ea..183eeb3863b38 100644
+--- a/drivers/hid/hid-ft260.c
++++ b/drivers/hid/hid-ft260.c
+@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ struct ft260_get_chip_version_report version;
+ int ret;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 8123b871a3ebf..0403beb3104b9 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -585,6 +585,8 @@ static void hammer_remove(struct hid_device *hdev)
+ static const struct hid_device_id hammer_devices[] = {
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
+index 0a38e8e9bc783..403506b9697e7 100644
+--- a/drivers/hid/hid-holtek-kbd.c
++++ b/drivers/hid/hid-holtek-kbd.c
+@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ static int holtek_kbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+ {
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- int ret = hid_parse(hdev);
++ struct usb_interface *intf;
++ int ret;
++
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
+
++ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
++ intf = to_usb_interface(hdev->dev.parent);
+ if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ struct hid_input *hidinput;
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
+index 195b735b001d0..b7172c48ef9f0 100644
+--- a/drivers/hid/hid-holtek-mouse.c
++++ b/drivers/hid/hid-holtek-mouse.c
+@@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ return rdesc;
+ }
+
++static int holtek_mouse_probe(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++ return 0;
++}
++
+ static const struct hid_device_id holtek_mouse_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+@@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = {
+ .name = "holtek_mouse",
+ .id_table = holtek_mouse_devices,
+ .report_fixup = holtek_mouse_report_fixup,
++ .probe = holtek_mouse_probe,
+ };
+
+ module_hid_driver(holtek_mouse_driver);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 3706c635b12ee..70e65eb1b868d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -394,6 +394,7 @@
+ #define USB_DEVICE_ID_HP_X2 0x074d
+ #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
++#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
+ #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
+
+@@ -496,6 +497,7 @@
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+ #define USB_DEVICE_ID_GOOGLE_DON 0x5050
++#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
+
+ #define USB_VENDOR_ID_GOTOP 0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2 0x007f
+@@ -881,6 +883,7 @@
+ #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
+ #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
+ #define USB_DEVICE_ID_MS_POWER_COVER 0x07da
++#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de
+ #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
+ #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
+ #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 6561770f1af55..55017db98d896 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -325,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
++ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index d40af911df635..fb3f7258009c2 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
+
+ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
+- __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
++ struct usb_interface *iface;
++ __u8 iface_num;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct lg_drv_data *drv_data;
+ int ret;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
++ iface = to_usb_interface(hdev->dev.parent);
++ iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
++
+ /* G29 only work with the 1st interface */
+ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
+ (iface_num != 0)) {
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index a0017b010c342..7106b921b53cf 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
+ case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ case recvr_type_dinovo: no_dj_interfaces = 2; break;
+ }
+- if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
++ if (hid_is_usb(hdev)) {
+ intf = to_usb_interface(hdev->dev.parent);
+ if (intf && intf->altsetting->desc.bInterfaceNumber >=
+ no_dj_interfaces) {
+diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
+index 2666af02d5c1a..e4e9471d0f1e9 100644
+--- a/drivers/hid/hid-prodikeys.c
++++ b/drivers/hid/hid-prodikeys.c
+@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int ret;
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
++ struct usb_interface *intf;
++ unsigned short ifnum;
+ unsigned long quirks = id->driver_data;
+ struct pk_device *pk;
+ struct pcmidi_snd *pm = NULL;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
++ intf = to_usb_interface(hdev->dev.parent);
++ ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
++
+ pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+ if (pk == NULL) {
+ hid_err(hdev, "can't alloc descriptor\n");
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 2e104682c22b9..65b7114761749 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
+index 4556d2a50f754..d94ee0539421e 100644
+--- a/drivers/hid/hid-roccat-arvo.c
++++ b/drivers/hid/hid-roccat-arvo.c
+@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
+index ce5f22519956a..e95d59cd8d075 100644
+--- a/drivers/hid/hid-roccat-isku.c
++++ b/drivers/hid/hid-roccat-isku.c
+@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
+index 1ca64481145ee..e8522eacf7973 100644
+--- a/drivers/hid/hid-roccat-kone.c
++++ b/drivers/hid/hid-roccat-kone.c
+@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
+index 0316edf8c5bb4..1896c69ea512f 100644
+--- a/drivers/hid/hid-roccat-koneplus.c
++++ b/drivers/hid/hid-roccat-koneplus.c
+@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
+index 5248b3c7cf785..cf8eeb33a1257 100644
+--- a/drivers/hid/hid-roccat-konepure.c
++++ b/drivers/hid/hid-roccat-konepure.c
+@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
+index 9600128815705..6fb9b9563769d 100644
+--- a/drivers/hid/hid-roccat-kovaplus.c
++++ b/drivers/hid/hid-roccat-kovaplus.c
+@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
+index 4a88a76d5c622..d5ddf0d68346b 100644
+--- a/drivers/hid/hid-roccat-lua.c
++++ b/drivers/hid/hid-roccat-lua.c
+@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
+index 989927defe8db..4fcc8e7d276f2 100644
+--- a/drivers/hid/hid-roccat-pyra.c
++++ b/drivers/hid/hid-roccat-pyra.c
+@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
+index 3956a6c9c5217..5bf1971a2b14d 100644
+--- a/drivers/hid/hid-roccat-ryos.c
++++ b/drivers/hid/hid-roccat-ryos.c
+@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
+index 818701f7a0281..a784bb4ee6512 100644
+--- a/drivers/hid/hid-roccat-savu.c
++++ b/drivers/hid/hid-roccat-savu.c
+@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
+index 2e1c31156eca0..cf5992e970940 100644
+--- a/drivers/hid/hid-samsung.c
++++ b/drivers/hid/hid-samsung.c
+@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
+ int ret;
+ unsigned int cmask = HID_CONNECT_DEFAULT;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index d1b107d547f54..60ec2b29d54de 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+- usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+- hid_hw_stop(hdev);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto err;
+ }
+
+ if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
++ if (!hid_is_usb(hdev)) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ usbdev = to_usb_device(sc->hdev->dev.parent->parent);
++
+ sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+- if (!sc->ghl_urb)
+- return -ENOMEM;
++ if (!sc->ghl_urb) {
++ ret = -ENOMEM;
++ goto err;
++ }
+
+ if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
+@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ ARRAY_SIZE(ghl_ps4_magic_data));
+ if (ret) {
+ hid_err(hdev, "error preparing URB\n");
+- return ret;
++ goto err;
+ }
+
+ timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
+@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ }
+
+ return ret;
++
++err:
++ hid_hw_stop(hdev);
++ return ret;
+ }
+
+ static void sony_remove(struct hid_device *hdev)
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index d44550aa88057..0c92b7f9b8b81 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -274,6 +274,9 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
+ int ret = 0;
+ struct tm_wheel *tm_wheel = 0;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed with error %d\n", ret);
+diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
+index 67ae2b18e33ac..ac3fd870673d2 100644
+--- a/drivers/hid/hid-u2fzero.c
++++ b/drivers/hid/hid-u2fzero.c
+@@ -290,7 +290,7 @@ static int u2fzero_probe(struct hid_device *hdev,
+ unsigned int minor;
+ int ret;
+
+- if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
++ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index 6a9865dd703c0..d8ab0139e5cda 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
+ struct uclogic_drvdata *drvdata = NULL;
+ bool params_initialized = false;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ /*
+ * libinput requires the pad interface to be on a different node
+ * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 3d67b748a3b95..adff1bd68d9f8 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
+ struct uclogic_params p = {0, };
+
+ /* Check arguments */
+- if (params == NULL || hdev == NULL ||
+- !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
++ if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 1c5039081db27..8e9d9450cb835 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
+
+ if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
+ && IPC_IS_ISH_ILUP(fwsts)) {
+- disable_irq_wake(pdev->irq);
++ if (device_may_wakeup(&pdev->dev))
++ disable_irq_wake(pdev->irq);
+
+ ish_set_host_ready(dev);
+
+@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
+ */
+ pci_save_state(pdev);
+
+- enable_irq_wake(pdev->irq);
++ if (device_may_wakeup(&pdev->dev))
++ enable_irq_wake(pdev->irq);
+ }
+ } else {
+ /*
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 93f49b766376e..b1bbf297f66b0 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
+ * Skip the query for this type and modify defaults based on
+ * interface number.
+ */
+- if (features->type == WIRELESS) {
++ if (features->type == WIRELESS && intf) {
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
+ features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
+ else
+@@ -2217,7 +2217,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
+ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
+ char *product_name = wacom->hdev->name;
+
+- if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
++ if (hid_is_usb(wacom->hdev)) {
+ struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ product_name = dev->product;
+@@ -2454,6 +2454,9 @@ static void wacom_wireless_work(struct work_struct *work)
+
+ wacom_destroy_battery(wacom);
+
++ if (!usbdev)
++ return;
++
+ /* Stylus interface */
+ hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom1 = hid_get_drvdata(hdev1);
+@@ -2733,8 +2736,6 @@ static void wacom_mode_change_work(struct work_struct *work)
+ static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+ {
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- struct usb_device *dev = interface_to_usbdev(intf);
+ struct wacom *wacom;
+ struct wacom_wac *wacom_wac;
+ struct wacom_features *features;
+@@ -2769,8 +2770,14 @@ static int wacom_probe(struct hid_device *hdev,
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
+- wacom->usbdev = dev;
+- wacom->intf = intf;
++ if (hid_is_usb(hdev)) {
++ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
++ struct usb_device *dev = interface_to_usbdev(intf);
++
++ wacom->usbdev = dev;
++ wacom->intf = intf;
++ }
++
+ mutex_init(&wacom->lock);
+ INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
+ INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index 774c1b0715d91..47fce97996de2 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -623,10 +623,9 @@ static void __init i8k_init_procfs(struct device *dev)
+ {
+ struct dell_smm_data *data = dev_get_drvdata(dev);
+
+- /* Register the proc entry */
+- proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
+-
+- devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
++ /* Only register exit function if creation was successful */
++ if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
++ devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+ }
+
+ #else
+diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
+index 17518b4cab1b0..f12b9a28a232d 100644
+--- a/drivers/hwmon/pwm-fan.c
++++ b/drivers/hwmon/pwm-fan.c
+@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- ctx->pwm_value = MAX_PWM;
+-
+ pwm_init_state(ctx->pwm, &ctx->pwm_state);
+
+ /*
+diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
+index a6ea1eb1394e1..53b8da6dbb23f 100644
+--- a/drivers/i2c/busses/i2c-mpc.c
++++ b/drivers/i2c/busses/i2c-mpc.c
+@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
+ status = readb(i2c->base + MPC_I2C_SR);
+ if (status & CSR_MIF) {
+ /* Wait up to 100us for transfer to properly complete */
+- readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
++ readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
+ writeb(0, i2c->base + MPC_I2C_SR);
+ mpc_i2c_do_intr(i2c, status);
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index a51fdd3c9b5b5..24c9387c29687 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
+ return 0;
+
+ err_buffer_cleanup:
+- if (data->dready_trig)
+- iio_triggered_buffer_cleanup(indio_dev);
++ iio_triggered_buffer_cleanup(indio_dev);
+ err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
++ iio_triggered_buffer_cleanup(indio_dev);
+ if (data->dready_trig) {
+- iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index bf7ed9e7d00f4..e56ecc075014e 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
+ hw_values.chan,
+ sizeof(hw_values.chan));
+ if (ret) {
+- dev_err(st->dev,
+- "error reading data\n");
+- return ret;
++ dev_err(st->dev, "error reading data: %d\n", ret);
++ goto out;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &hw_values,
+ iio_get_time_ns(indio_dev));
++out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
+index 715b8138fb715..09c7f10fefb6e 100644
+--- a/drivers/iio/accel/mma8452.c
++++ b/drivers/iio/accel/mma8452.c
+@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
+ if (ret)
+ return ret;
+
+- indio_dev->trig = trig;
++ indio_dev->trig = iio_trigger_get(trig);
+
+ return 0;
+ }
+diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
+index 2c5c8a3672b2d..aa42ba759fa1a 100644
+--- a/drivers/iio/adc/ad7768-1.c
++++ b/drivers/iio/adc/ad7768-1.c
+@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
+ iio_get_time_ns(indio_dev));
+
+- iio_trigger_notify_done(indio_dev->trig);
+ err_unlock:
++ iio_trigger_notify_done(indio_dev->trig);
+ mutex_unlock(&st->lock);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index ea5ca163d8796..c4de706012e51 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -1377,7 +1377,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
+ *val = st->conversion_value;
+ ret = at91_adc_adjust_val_osr(st, val);
+ if (chan->scan_type.sign == 's')
+- *val = sign_extend32(*val, 11);
++ *val = sign_extend32(*val,
++ chan->scan_type.realbits - 1);
+ st->conversion_done = false;
+ }
+
+diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
+index 3e0c0233b4315..df99f1365c398 100644
+--- a/drivers/iio/adc/axp20x_adc.c
++++ b/drivers/iio/adc/axp20x_adc.c
+@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+ {
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+- int size;
+
+- /*
+- * N.B.: Unlike the Chinese datasheets tell, the charging current is
+- * stored on 12 bits, not 13 bits. Only discharging current is on 13
+- * bits.
+- */
+- if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
+- size = 13;
+- else
+- size = 12;
+-
+- *val = axp20x_read_variable_width(info->regmap, chan->address, size);
++ *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
+ if (*val < 0)
+ return *val;
+
+@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case IIO_CURRENT:
+- *val = 0;
+- *val2 = 500000;
+- return IIO_VAL_INT_PLUS_MICRO;
++ *val = 1;
++ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *val = 100;
+diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
+index 16407664182ce..97d162a3cba4e 100644
+--- a/drivers/iio/adc/dln2-adc.c
++++ b/drivers/iio/adc/dln2-adc.c
+@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
+ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
+ {
+ int ret, i;
+- struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
+ u16 conflict;
+ __le16 value;
+ int olen = sizeof(value);
+@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
+ .chan = channel,
+ };
+
+- ret = iio_device_claim_direct_mode(indio_dev);
+- if (ret < 0)
+- return ret;
+-
+ ret = dln2_adc_set_chan_enabled(dln2, channel, true);
+ if (ret < 0)
+- goto release_direct;
++ return ret;
+
+ ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
+ if (ret < 0) {
+@@ -300,8 +295,6 @@ disable_port:
+ dln2_adc_set_port_enabled(dln2, false, NULL);
+ disable_chan:
+ dln2_adc_set_chan_enabled(dln2, channel, false);
+-release_direct:
+- iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+ }
+@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret < 0)
++ return ret;
++
+ mutex_lock(&dln2->mutex);
+ ret = dln2_adc_read(dln2, chan->channel);
+ mutex_unlock(&dln2->mutex);
+
++ iio_device_release_direct_mode(indio_dev);
++
+ if (ret < 0)
+ return ret;
+
+@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+ iio_trigger_set_drvdata(dln2->trig, dln2);
+- devm_iio_trigger_register(dev, dln2->trig);
++ ret = devm_iio_trigger_register(dev, dln2->trig);
++ if (ret) {
++ dev_err(dev, "failed to register trigger: %d\n", ret);
++ return ret;
++ }
+ iio_trigger_set_immutable(indio_dev, dln2->trig);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 5088de835bb15..e3e75413b49e7 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -975,6 +975,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
+ {
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
++ stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
+ stm32h7_adc_disable(indio_dev);
+ stm32h7_adc_enter_pwr_down(adc);
+ }
+diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
+index 3e0734ddafe36..600e9725da788 100644
+--- a/drivers/iio/gyro/adxrs290.c
++++ b/drivers/iio/gyro/adxrs290.c
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
+ goto err_unlock;
+ }
+
+- *val = temp;
++ *val = sign_extend32(temp, 15);
+
+ err_unlock:
+ mutex_unlock(&st->lock);
+@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
+ }
+
+ /* extract lower 12 bits temperature reading */
+- *val = temp & 0x0FFF;
++ *val = sign_extend32(temp, 11);
+
+ err_unlock:
+ mutex_unlock(&st->lock);
+diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
+index 04dd6a7969ea7..4cfa0d4395605 100644
+--- a/drivers/iio/gyro/itg3200_buffer.c
++++ b/drivers/iio/gyro/itg3200_buffer.c
+@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+
++error_ret:
+ iio_trigger_notify_done(indio_dev->trig);
+
+-error_ret:
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index b23caa2f2aa1f..93990ff1dfe39 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
+ irq_modify_status(trig->subirq_base + i,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
+ }
+- get_device(&trig->dev);
+
+ return trig;
+
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index 1830221da48d2..f0bd0ad34f222 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -1273,7 +1273,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+ ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
+ als_buf, sizeof(als_buf));
+ if (ret < 0)
+- return ret;
++ goto done;
+ if (test_bit(0, indio_dev->active_scan_mask))
+ scan.channels[j++] = le16_to_cpu(als_buf[1]);
+ if (test_bit(1, indio_dev->active_scan_mask))
+diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
+index 07e91846307c7..fc63856ed54de 100644
+--- a/drivers/iio/light/stk3310.c
++++ b/drivers/iio/light/stk3310.c
+@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->reg_flag_nf, &dir);
+ if (ret < 0) {
+- dev_err(&data->client->dev, "register read failed\n");
+- mutex_unlock(&data->lock);
+- return ret;
++ dev_err(&data->client->dev, "register read failed: %d\n", ret);
++ goto out;
+ }
+ event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
+ IIO_EV_TYPE_THRESH,
+@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
+ ret = regmap_field_write(data->reg_flag_psint, 0);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to reset interrupts\n");
++out:
+ mutex_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
+index 33083877cd19d..4353b749ecef2 100644
+--- a/drivers/iio/trigger/stm32-timer-trigger.c
++++ b/drivers/iio/trigger/stm32-timer-trigger.c
+@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
+ };
+ module_platform_driver(stm32_timer_trigger_driver);
+
+-MODULE_ALIAS("platform: stm32-timer-trigger");
++MODULE_ALIAS("platform:stm32-timer-trigger");
+ MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 37273dc0c03ca..b0d587254fe66 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -8414,6 +8414,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
+ */
+ static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+ {
++ if (!rcd->rcvhdrq)
++ return;
+ clear_recv_intr(rcd);
+ if (check_packet_present(rcd))
+ force_recv_intr(rcd);
+diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
+index de411884386bf..385e6cff0d279 100644
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -1011,6 +1011,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
+ struct hfi1_packet packet;
+ int skip_pkt = 0;
+
++ if (!rcd->rcvhdrq)
++ return RCV_PKT_OK;
+ /* Control context will always use the slow path interrupt handler */
+ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
+
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index e3679d076eaaf..ec2a45c5cf575 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -112,7 +112,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
+ rcd->fast_handler = get_dma_rtail_setting(rcd) ?
+ handle_receive_interrupt_dma_rtail :
+ handle_receive_interrupt_nodma_rtail;
+- rcd->slow_handler = handle_receive_interrupt;
+
+ hfi1_set_seq_cnt(rcd, 1);
+
+@@ -333,6 +332,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
+ rcd->numa_id = numa;
+ rcd->rcv_array_groups = dd->rcv_entries.ngroups;
+ rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
++ rcd->slow_handler = handle_receive_interrupt;
++ rcd->do_interrupt = rcd->slow_handler;
+ rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
+
+ mutex_init(&rcd->exp_mutex);
+@@ -873,18 +874,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
+ if (ret)
+ goto done;
+
+- /* allocate dummy tail memory for all receive contexts */
+- dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+- sizeof(u64),
+- &dd->rcvhdrtail_dummy_dma,
+- GFP_KERNEL);
+-
+- if (!dd->rcvhdrtail_dummy_kvaddr) {
+- dd_dev_err(dd, "cannot allocate dummy tail memory\n");
+- ret = -ENOMEM;
+- goto done;
+- }
+-
+ /* dd->rcd can be NULL if early initialization failed */
+ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
+ /*
+@@ -897,8 +886,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
+ if (!rcd)
+ continue;
+
+- rcd->do_interrupt = &handle_receive_interrupt;
+-
+ lastfail = hfi1_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = hfi1_setup_eagerbufs(rcd);
+@@ -1119,7 +1106,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+ rcd->egrbufs.rcvtids = NULL;
+
+ for (e = 0; e < rcd->egrbufs.alloced; e++) {
+- if (rcd->egrbufs.buffers[e].dma)
++ if (rcd->egrbufs.buffers[e].addr)
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[e].len,
+ rcd->egrbufs.buffers[e].addr,
+@@ -1200,6 +1187,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
+ dd->tx_opstats = NULL;
+ kfree(dd->comp_vect);
+ dd->comp_vect = NULL;
++ if (dd->rcvhdrtail_dummy_kvaddr)
++ dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
++ (void *)dd->rcvhdrtail_dummy_kvaddr,
++ dd->rcvhdrtail_dummy_dma);
++ dd->rcvhdrtail_dummy_kvaddr = NULL;
+ sdma_clean(dd, dd->num_sdma);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
+ }
+@@ -1297,6 +1289,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ goto bail;
+ }
+
++ /* allocate dummy tail memory for all receive contexts */
++ dd->rcvhdrtail_dummy_kvaddr =
++ dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
++ &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
++ if (!dd->rcvhdrtail_dummy_kvaddr) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
+ atomic_set(&dd->ipoib_rsm_usr_num, 0);
+ return dd;
+
+@@ -1504,13 +1505,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
+
+ free_credit_return(dd);
+
+- if (dd->rcvhdrtail_dummy_kvaddr) {
+- dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+- (void *)dd->rcvhdrtail_dummy_kvaddr,
+- dd->rcvhdrtail_dummy_dma);
+- dd->rcvhdrtail_dummy_kvaddr = NULL;
+- }
+-
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 2b6c24b7b5865..f07d328689d3d 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
+ if (current->nr_cpus_allowed != 1)
+ goto out;
+
+- cpu_id = smp_processor_id();
+ rcu_read_lock();
++ cpu_id = smp_processor_id();
+ rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
+ sdma_rht_params);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index a9c6ffef9640f..51dd134952e77 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -33,6 +33,7 @@
+ #include <linux/acpi.h>
+ #include <linux/etherdevice.h>
+ #include <linux/interrupt.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <net/addrconf.h>
+@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+ unsigned long instance_stage,
+ unsigned long reset_stage)
+ {
++#define HW_RESET_TIMEOUT_US 1000000
++#define HW_RESET_SLEEP_US 1000
++
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ unsigned long val;
++ int ret;
+
+ /* When hardware reset is detected, we should stop sending mailbox&cmq&
+ * doorbell to hardware. If now in .init_instance() function, we should
+@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+ * again.
+ */
+ hr_dev->dis_db = true;
+- if (!ops->get_hw_reset_stat(handle))
++
++ ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
++ val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
++ HW_RESET_TIMEOUT_US, false, handle);
++ if (!ret)
+ hr_dev->is_reset = true;
+
+ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
+@@ -6397,10 +6407,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ if (!hr_dev)
+ return 0;
+
+- hr_dev->is_reset = true;
+ hr_dev->active = false;
+ hr_dev->dis_db = true;
+-
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
+
+ return 0;
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 53e0fb0562c11..c91ddb0cf1448 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ int hwirq, i;
+
+ mutex_lock(&msi_used_lock);
++ hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
++ order_base_2(nr_irqs));
++ mutex_unlock(&msi_used_lock);
+
+- hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+- 0, nr_irqs, 0);
+- if (hwirq >= PCI_MSI_DOORBELL_NR) {
+- mutex_unlock(&msi_used_lock);
++ if (hwirq < 0)
+ return -ENOSPC;
+- }
+-
+- bitmap_set(msi_used, hwirq, nr_irqs);
+- mutex_unlock(&msi_used_lock);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ NULL, NULL);
+ }
+
+- return hwirq;
++ return 0;
+ }
+
+ static void armada_370_xp_msi_free(struct irq_domain *domain,
+@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&msi_used_lock);
+- bitmap_clear(msi_used, d->hwirq, nr_irqs);
++ bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
+ }
+
+diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
+index f3c6855a4cefb..18b77c3e6db4b 100644
+--- a/drivers/irqchip/irq-aspeed-scu-ic.c
++++ b/drivers/irqchip/irq-aspeed-scu-ic.c
+@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+ generic_handle_domain_irq(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
+
+- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
+- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
++ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
++ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ }
+
+ chained_irq_exit(chip, desc);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index eb0882d153666..0cb584d9815b9 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
+
+ its_fixup_cmd(cmd);
+
+- return NULL;
++ return desc->its_invall_cmd.col;
+ }
+
+ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
+index b31c4cff4d3a5..599bb6fc5f0a2 100644
+--- a/drivers/irqchip/irq-nvic.c
++++ b/drivers/irqchip/irq-nvic.c
+@@ -26,7 +26,7 @@
+
+ #define NVIC_ISER 0x000
+ #define NVIC_ICER 0x080
+-#define NVIC_IPR 0x300
++#define NVIC_IPR 0x400
+
+ #define NVIC_MAX_BANKS 16
+ /*
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e89eb467f1429..44006b860d0a5 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -2193,6 +2193,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+
+ if (!num_sectors || num_sectors > max_sectors)
+ num_sectors = max_sectors;
++ rdev->sb_start = sb_start;
+ }
+ sb = page_address(rdev->sb_page);
+ sb->data_size = cpu_to_le64(num_sectors);
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index baf83594a01d3..5121edb0d9eff 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
+ mutex_lock(&pcr->pcr_mutex);
+ rtsx_pci_power_off(pcr, HOST_ENTER_S3);
+
+- free_irq(pcr->irq, (void *)pcr);
+-
+ mutex_unlock(&pcr->pcr_mutex);
+
+ pcr->is_runtime_suspended = true;
+@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
+- rtsx_pci_acquire_irq(pcr);
+- synchronize_irq(pcr->irq);
+
+ if (pcr->ops->fetch_vendor_settings)
+ pcr->ops->fetch_vendor_settings(pcr);
+diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
+index 632325474233a..b38978a3b3ffa 100644
+--- a/drivers/misc/eeprom/at25.c
++++ b/drivers/misc/eeprom/at25.c
+@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
+ static int at25_probe(struct spi_device *spi)
+ {
+ struct at25_data *at25 = NULL;
+- struct spi_eeprom chip;
+ int err;
+ int sr;
+ u8 id[FM25_ID_LEN];
+@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
+ if (match && !strcmp(match->compatible, "cypress,fm25"))
+ is_fram = 1;
+
++ at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
++ if (!at25)
++ return -ENOMEM;
++
+ /* Chip description */
+- if (!spi->dev.platform_data) {
+- if (!is_fram) {
+- err = at25_fw_to_chip(&spi->dev, &chip);
+- if (err)
+- return err;
+- }
+- } else
+- chip = *(struct spi_eeprom *)spi->dev.platform_data;
++ if (spi->dev.platform_data) {
++ memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
++ } else if (!is_fram) {
++ err = at25_fw_to_chip(&spi->dev, &at25->chip);
++ if (err)
++ return err;
++ }
+
+ /* Ping the chip ... the status register is pretty portable,
+ * unlike probing manufacturer IDs. We do expect that system
+@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
+ return -ENXIO;
+ }
+
+- at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+- if (!at25)
+- return -ENOMEM;
+-
+ mutex_init(&at25->lock);
+- at25->chip = chip;
+ at25->spi = spi;
+ spi_set_drvdata(spi, at25);
+
+@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
+ dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
+ }
+- chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
++ at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+
+ if (at25->chip.byte_len > 64 * 1024)
+ at25->chip.flags |= EE_ADDR3;
+@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
+ at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
+ at25->nvmem_config.name = dev_name(&spi->dev);
+ at25->nvmem_config.dev = &spi->dev;
+- at25->nvmem_config.read_only = chip.flags & EE_READONLY;
++ at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
+ at25->nvmem_config.root_only = true;
+ at25->nvmem_config.owner = THIS_MODULE;
+ at25->nvmem_config.compat = true;
+@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
+ at25->nvmem_config.priv = at25;
+ at25->nvmem_config.stride = 1;
+ at25->nvmem_config.word_size = 1;
+- at25->nvmem_config.size = chip.byte_len;
++ at25->nvmem_config.size = at25->chip.byte_len;
+
+ at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
+ if (IS_ERR(at25->nvmem))
+ return PTR_ERR(at25->nvmem);
+
+ dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
+- (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
+- (chip.byte_len < 1024) ? "Byte" : "KByte",
++ (at25->chip.byte_len < 1024) ?
++ at25->chip.byte_len : (at25->chip.byte_len / 1024),
++ (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
+ at25->chip.name, is_fram ? "fram" : "eeprom",
+- (chip.flags & EE_READONLY) ? " (readonly)" : "",
++ (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
+ at25->chip.page_size);
+ return 0;
+ }
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index ad6ced4546556..f3002653bd010 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
+ static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
+ {
+ u64 size = 0;
+- int i;
++ int oix;
+
+ size = ALIGN(metalen, FASTRPC_ALIGN);
+- for (i = 0; i < ctx->nscalars; i++) {
++ for (oix = 0; oix < ctx->nbufs; oix++) {
++ int i = ctx->olaps[oix].raix;
++
+ if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
+
+- if (ctx->olaps[i].offset == 0)
++ if (ctx->olaps[oix].offset == 0)
+ size = ALIGN(size, FASTRPC_ALIGN);
+
+- size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
++ size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
+ }
+ }
+
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index f4c8e1a61f537..b431cdd27353b 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi)
+ return 0;
+ }
+
++static const struct spi_device_id mmc_spi_dev_ids[] = {
++ { "mmc-spi-slot"},
++ { },
++};
++MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
++
+ static const struct of_device_id mmc_spi_of_match_table[] = {
+ { .compatible = "mmc-spi-slot", },
+ {},
+@@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = {
+ .name = "mmc_spi",
+ .of_match_table = mmc_spi_of_match_table,
+ },
++ .id_table = mmc_spi_dev_ids,
+ .probe = mmc_spi_probe,
+ .remove = mmc_spi_remove,
+ };
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index a4407f391f66a..f5b2684ad8058 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
+
+ /* Issue CMD19 twice for each tap */
+ for (i = 0; i < 2 * priv->tap_num; i++) {
+- int cmd_error;
++ int cmd_error = 0;
+
+ /* Set sampling clock position */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
+diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
+index 9802e265fca80..2b317ed6c103f 100644
+--- a/drivers/mtd/devices/mtd_dataflash.c
++++ b/drivers/mtd/devices/mtd_dataflash.c
+@@ -96,6 +96,13 @@ struct dataflash {
+ struct mtd_info mtd;
+ };
+
++static const struct spi_device_id dataflash_dev_ids[] = {
++ { "at45" },
++ { "dataflash" },
++ { },
++};
++MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
++
+ #ifdef CONFIG_OF
+ static const struct of_device_id dataflash_dt_ids[] = {
+ { .compatible = "atmel,at45", },
+@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
+ .name = "mtd_dataflash",
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
+ },
++ .id_table = dataflash_dev_ids,
+
+ .probe = dataflash_probe,
+ .remove = dataflash_remove,
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 658f0cbe7ce8c..6b2bda815b880 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -15,6 +15,7 @@
+
+ #include <linux/clk.h>
+ #include <linux/completion.h>
++#include <linux/delay.h>
+ #include <linux/dmaengine.h>
+ #include <linux/dma-direction.h>
+ #include <linux/dma-mapping.h>
+@@ -93,6 +94,14 @@
+
+ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
++/*
++ * According to SPEAr300 Reference Manual (RM0082)
++ * TOUDEL = 7ns (Output delay from the flip-flops to the board)
++ * TINDEL = 5ns (Input delay from the board to the flipflop)
++ */
++#define TOUTDEL 7000
++#define TINDEL 5000
++
+ struct fsmc_nand_timings {
+ u8 tclr;
+ u8 tar;
+@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ {
+ unsigned long hclk = clk_get_rate(host->clk);
+ unsigned long hclkn = NSEC_PER_SEC / hclk;
+- u32 thiz, thold, twait, tset;
++ u32 thiz, thold, twait, tset, twait_min;
+
+ if (sdrt->tRC_min < 30000)
+ return -EOPNOTSUPP;
+@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ else if (tims->thold > FSMC_THOLD_MASK)
+ tims->thold = FSMC_THOLD_MASK;
+
+- twait = max(sdrt->tRP_min, sdrt->tWP_min);
+- tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+- if (tims->twait == 0)
+- tims->twait = 1;
+- else if (tims->twait > FSMC_TWAIT_MASK)
+- tims->twait = FSMC_TWAIT_MASK;
+-
+ tset = max(sdrt->tCS_min - sdrt->tWP_min,
+ sdrt->tCEA_max - sdrt->tREA_max);
+ tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
+@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ else if (tims->tset > FSMC_TSET_MASK)
+ tims->tset = FSMC_TSET_MASK;
+
++ /*
++ * According to SPEAr300 Reference Manual (RM0082) which gives more
++ * information related to FSMSC timings than the SPEAr600 one (RM0305),
++ * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
++ */
++ twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
++ + TOUTDEL + TINDEL;
++ twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
++
++ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
++ if (tims->twait == 0)
++ tims->twait = 1;
++ else if (tims->twait > FSMC_TWAIT_MASK)
++ tims->twait = FSMC_TWAIT_MASK;
++
+ return 0;
+ }
+
+@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
++
++ if (instr->delay_ns)
++ ndelay(instr->delay_ns);
+ }
+
+ return ret;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 7d3752cbf761d..bca36be884b8d 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1502,14 +1502,14 @@ void bond_alb_monitor(struct work_struct *work)
+ struct slave *slave;
+
+ if (!bond_has_slaves(bond)) {
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ bond_info->lp_counter = 0;
+ goto re_arm;
+ }
+
+ rcu_read_lock();
+
+- bond_info->tx_rebalance_counter++;
++ atomic_inc(&bond_info->tx_rebalance_counter);
+ bond_info->lp_counter++;
+
+ /* send learning packets */
+@@ -1531,7 +1531,7 @@ void bond_alb_monitor(struct work_struct *work)
+ }
+
+ /* rebalance tx traffic */
+- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
++ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ tlb_clear_slave(bond, slave, 1);
+ if (slave == rcu_access_pointer(bond->curr_active_slave)) {
+@@ -1541,7 +1541,7 @@ void bond_alb_monitor(struct work_struct *work)
+ bond_info->unbalanced_load = 0;
+ }
+ }
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ }
+
+ if (bond_info->rlb_enabled) {
+@@ -1611,7 +1611,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
+ tlb_init_slave(slave);
+
+ /* order a rebalance ASAP */
+- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond->alb_info.tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+
+ if (bond->alb_info.rlb_enabled)
+ bond->alb_info.rlb_rebalance = 1;
+@@ -1648,7 +1649,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
+ rlb_clear_slave(bond, slave);
+ } else if (link == BOND_LINK_UP) {
+ /* order a rebalance ASAP */
+- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond_info->tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.rlb_rebalance = 1;
+ /* If the updelay module parameter is smaller than the
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 74d9899fc904c..eb74cdf26b88c 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
+ #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+
++/* Kvaser KCAN_EPACK second word */
++#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
++
+ struct kvaser_pciefd;
+
+ struct kvaser_pciefd_can {
+@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+
+ can->err_rep_cnt++;
+ can->can.can_stats.bus_error++;
+- stats->rx_errors++;
++ if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
++ stats->tx_errors++;
++ else
++ stats->rx_errors++;
+
+ can->bec.txerr = bec.txerr;
+ can->bec.rxerr = bec.rxerr;
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 2470c47b2e315..e330b4c121bf3 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -204,16 +204,16 @@ enum m_can_reg {
+
+ /* Interrupts for version 3.0.x */
+ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
+-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
+- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+- IR_RF1L | IR_RF0L)
++#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
++ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
++ IR_RF0L)
+ #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
+
+ /* Interrupts for version >= 3.1.x */
+ #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
+-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
+- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+- IR_RF1L | IR_RF0L)
++#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
++ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
++ IR_RF0L)
+ #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
+
+ /* Interrupt Line Select (ILS) */
+@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
+ err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
+ cf->data, DIV_ROUND_UP(cf->len, 4));
+ if (err)
+- goto out_fail;
++ goto out_free_skb;
+ }
+
+ /* acknowledge rx fifo 0 */
+@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
+
+ return 0;
+
++out_free_skb:
++ kfree_skb(skb);
+ out_fail:
+ netdev_err(dev, "FIFO read returned %d\n", err);
+ return err;
+@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
+ {
+ if (irqstatus & IR_WDI)
+ netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
+- if (irqstatus & IR_ELO)
+- netdev_err(dev, "Error Logging Overflow\n");
+ if (irqstatus & IR_BEU)
+ netdev_err(dev, "Bit Error Uncorrected\n");
+ if (irqstatus & IR_BEC)
+diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
+index 89cc3d41e952b..8f184a852a0a7 100644
+--- a/drivers/net/can/m_can/m_can_pci.c
++++ b/drivers/net/can/m_can/m_can_pci.c
+@@ -18,7 +18,7 @@
+
+ #define M_CAN_PCI_MMIO_BAR 0
+
+-#define M_CAN_CLOCK_FREQ_EHL 100000000
++#define M_CAN_CLOCK_FREQ_EHL 200000000
+ #define CTL_CSR_INT_CTL_OFFSET 0x508
+
+ struct m_can_pci_priv {
+@@ -42,8 +42,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
+ static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
+ {
+ struct m_can_pci_priv *priv = cdev_to_priv(cdev);
++ void __iomem *src = priv->base + offset;
+
+- ioread32_rep(priv->base + offset, val, val_count);
++ while (val_count--) {
++ *(unsigned int *)val = ioread32(src);
++ val += 4;
++ src += 4;
++ }
+
+ return 0;
+ }
+@@ -61,8 +66,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
+ const void *val, size_t val_count)
+ {
+ struct m_can_pci_priv *priv = cdev_to_priv(cdev);
++ void __iomem *dst = priv->base + offset;
+
+- iowrite32_rep(priv->base + offset, val, val_count);
++ while (val_count--) {
++ iowrite32(*(unsigned int *)val, dst);
++ val += 4;
++ dst += 4;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
+index 92a54a5fd4c50..964c8a09226a9 100644
+--- a/drivers/net/can/pch_can.c
++++ b/drivers/net/can/pch_can.c
+@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
+ cf->data[i + 1] = data_reg >> 8;
+ }
+
+- netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ quota--;
+ stats->rx_bytes += cf->len;
++ netif_receive_skb(skb);
+
+ pch_fifo_thresh(priv, obj_num);
+ obj_num++;
+diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
+index e21b169c14c01..4642b6d4aaf7b 100644
+--- a/drivers/net/can/sja1000/ems_pcmcia.c
++++ b/drivers/net/can/sja1000/ems_pcmcia.c
+@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
+ free_sja1000dev(dev);
+ }
+
+- err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
++ if (!card->channels) {
++ err = -ENODEV;
++ goto failure_cleanup;
++ }
++
++ err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ DRV_NAME, card);
+ if (!err)
+ return 0;
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 59ba7c7beec00..f7af1bf5ab46d 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -28,10 +28,6 @@
+
+ #include "kvaser_usb.h"
+
+-/* Forward declaration */
+-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+-
+-#define CAN_USB_CLOCK 8000000
+ #define MAX_USBCAN_NET_DEVICES 2
+
+ /* Command header size */
+@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+
+ #define CMD_LEAF_LOG_MESSAGE 106
+
++/* Leaf frequency options */
++#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
++#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
++#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
++#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
++
+ /* error factors */
+ #define M16C_EF_ACKE BIT(0)
+ #define M16C_EF_CRCE BIT(1)
+@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
+ };
+ };
+
++static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
++ .name = "kvaser_usb",
++ .tseg1_min = KVASER_USB_TSEG1_MIN,
++ .tseg1_max = KVASER_USB_TSEG1_MAX,
++ .tseg2_min = KVASER_USB_TSEG2_MIN,
++ .tseg2_max = KVASER_USB_TSEG2_MAX,
++ .sjw_max = KVASER_USB_SJW_MAX,
++ .brp_min = KVASER_USB_BRP_MIN,
++ .brp_max = KVASER_USB_BRP_MAX,
++ .brp_inc = KVASER_USB_BRP_INC,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
++ .clock = {
++ .freq = 8000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
++ .clock = {
++ .freq = 16000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
++ .clock = {
++ .freq = 24000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
++ .clock = {
++ .freq = 32000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
+ static void *
+ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
+ return rc;
+ }
+
++static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
++ const struct leaf_cmd_softinfo *softinfo)
++{
++ u32 sw_options = le32_to_cpu(softinfo->sw_options);
++
++ dev->fw_version = le32_to_cpu(softinfo->fw_version);
++ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
++
++ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
++ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
++ break;
++ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
++ break;
++ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
++ break;
++ }
++}
++
+ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+ {
+ struct kvaser_cmd cmd;
+@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+- dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
+- dev->max_tx_urbs =
+- le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
++ kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ break;
+ }
+
+@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+ {
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+- dev->cfg = &kvaser_usb_leaf_dev_cfg;
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ return 0;
+ }
+
+-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+- .name = "kvaser_usb",
+- .tseg1_min = KVASER_USB_TSEG1_MIN,
+- .tseg1_max = KVASER_USB_TSEG1_MAX,
+- .tseg2_min = KVASER_USB_TSEG2_MIN,
+- .tseg2_max = KVASER_USB_TSEG2_MAX,
+- .sjw_max = KVASER_USB_SJW_MAX,
+- .brp_min = KVASER_USB_BRP_MIN,
+- .brp_max = KVASER_USB_BRP_MAX,
+- .brp_inc = KVASER_USB_BRP_INC,
+-};
+-
+ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ {
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
+ };
+-
+-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
+- .clock = {
+- .freq = CAN_USB_CLOCK,
+- },
+- .timestamp_freq = 1,
+- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+-};
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index be8589fa86a15..a31cc0ab7c625 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
+ u16 reg;
+ int err;
+
++ /* The 88e6250 family does not have the PHY detect bit. Instead,
++ * report whether the port is internal.
++ */
++ if (chip->info->family == MV88E6XXX_FAMILY_6250)
++ return port < chip->info->num_internal_phys;
++
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev,
+@@ -693,44 +699,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+ {
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p;
+- int err;
++ int err = 0;
+
+ p = &chip->ports[port];
+
+- /* FIXME: is this the correct test? If we're in fixed mode on an
+- * internal port, why should we process this any different from
+- * PHY mode? On the other hand, the port may be automedia between
+- * an internal PHY and the serdes...
+- */
+- if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
+- return;
+-
+ mv88e6xxx_reg_lock(chip);
+- /* In inband mode, the link may come up at any time while the link
+- * is not forced down. Force the link down while we reconfigure the
+- * interface mode.
+- */
+- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
+- chip->info->ops->port_set_link)
+- chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
+-
+- err = mv88e6xxx_port_config_interface(chip, port, state->interface);
+- if (err && err != -EOPNOTSUPP)
+- goto err_unlock;
+
+- err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
+- state->advertising);
+- /* FIXME: we should restart negotiation if something changed - which
+- * is something we get if we convert to using phylinks PCS operations.
+- */
+- if (err > 0)
+- err = 0;
++ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
++ /* In inband mode, the link may come up at any time while the
++ * link is not forced down. Force the link down while we
++ * reconfigure the interface mode.
++ */
++ if (mode == MLO_AN_INBAND &&
++ p->interface != state->interface &&
++ chip->info->ops->port_set_link)
++ chip->info->ops->port_set_link(chip, port,
++ LINK_FORCED_DOWN);
++
++ err = mv88e6xxx_port_config_interface(chip, port,
++ state->interface);
++ if (err && err != -EOPNOTSUPP)
++ goto err_unlock;
++
++ err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
++ state->interface,
++ state->advertising);
++ /* FIXME: we should restart negotiation if something changed -
++ * which is something we get if we convert to using phylinks
++ * PCS operations.
++ */
++ if (err > 0)
++ err = 0;
++ }
+
+ /* Undo the forced down state above after completing configuration
+- * irrespective of its state on entry, which allows the link to come up.
++ * irrespective of its state on entry, which allows the link to come
++ * up in the in-band case where there is no separate SERDES. Also
++ * ensure that the link can come up if the PPU is in use and we are
++ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
+ */
+- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
+- chip->info->ops->port_set_link)
++ if (chip->info->ops->port_set_link &&
++ ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
++ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
+ chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
+
+ p->interface = state->interface;
+@@ -753,11 +763,10 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+- /* Internal PHYs propagate their configuration directly to the MAC.
+- * External PHYs depend on whether the PPU is enabled for this port.
++ /* Force the link down if we know the port may not be automatically
++ * updated by the switch or if we are using fixed-link mode.
+ */
+- if (((!mv88e6xxx_phy_is_internal(ds, port) &&
+- !mv88e6xxx_port_ppu_updates(chip, port)) ||
++ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+ mode == MLO_AN_FIXED) && ops->port_sync_link)
+ err = ops->port_sync_link(chip, port, mode, false);
+ mv88e6xxx_reg_unlock(chip);
+@@ -780,11 +789,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+- /* Internal PHYs propagate their configuration directly to the MAC.
+- * External PHYs depend on whether the PPU is enabled for this port.
++ /* Configure and force the link up if we know that the port may not
++ * automatically updated by the switch or if we are using fixed-link
++ * mode.
+ */
+- if ((!mv88e6xxx_phy_is_internal(ds, port) &&
+- !mv88e6xxx_port_ppu_updates(chip, port)) ||
++ if (!mv88e6xxx_port_ppu_updates(chip, port) ||
+ mode == MLO_AN_FIXED) {
+ /* FIXME: for an automedia port, should we force the link
+ * down here - what if the link comes up due to "other" media
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index 55273013bfb55..2b05ead515cdc 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -830,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up)
+ {
+ u8 cmode = chip->ports[port].cmode;
+- int err = 0;
++ int err;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+@@ -842,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ err = mv88e6390_serdes_power_10g(chip, lane, up);
+ break;
++ default:
++ err = -EINVAL;
++ break;
+ }
+
+ if (!err && up)
+@@ -1541,6 +1544,9 @@ int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ err = mv88e6390_serdes_power_10g(chip, lane, on);
+ break;
++ default:
++ err = -EINVAL;
++ break;
+ }
+
+ if (err)
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 6873d5a253afb..1513dfb523de7 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
+ }
+ }
+
+- if (cpu < 0)
++ if (cpu < 0) {
++ kfree(tagging_rule);
++ kfree(redirect_rule);
+ return -EINVAL;
++ }
+
+ tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
+diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
+index 1c00d719e5d76..804b37c76b1e6 100644
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
+ priv->rxdescmem_busaddr = dma_res->start;
+
+ } else {
++ ret = -ENODEV;
+ goto err_free_netdev;
+ }
+
+- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
++ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
+ dma_set_coherent_mask(priv->device,
+ DMA_BIT_MASK(priv->dmaops->dmamask));
+- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
++ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
+- else
++ } else {
++ ret = -EIO;
+ goto err_free_netdev;
++ }
+
+ /* MAC address space */
+ ret = request_and_map(pdev, "control_port", &control_port,
+diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
+index 02a569500234c..376f81796a293 100644
+--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
++++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
+@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
+
+ enet->irq_tx = platform_get_irq_byname(pdev, "tx");
+
+- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
++ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
++ if (err)
++ return err;
+
+ err = bcm4908_enet_dma_alloc(enet);
+ if (err)
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index 7b4961daa2540..ed7301b691694 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -377,6 +377,9 @@ struct bufdesc_ex {
+ #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
+ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+ #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
++#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
++ (((X) == 1) ? FEC_ENET_RXF_1 : \
++ FEC_ENET_RXF_2))
+ #define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+ #define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index ec87b370bba1f..a3e87e10ee6bd 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ break;
+ pkt_received++;
+
+- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
++ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index 291e61ac3e448..2c1b1da1220ec 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
+ return;
+ }
++ if (vsi->type != I40E_VSI_MAIN &&
++ vsi->type != I40E_VSI_FDIR &&
++ vsi->type != I40E_VSI_VMDQ2) {
++ dev_info(&pf->pdev->dev,
++ "vsi %d type %d descriptor rings not available\n",
++ vsi_seid, vsi->type);
++ return;
++ }
+ if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
+ dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
+ return;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 80ae264c99ba0..2ea4deb8fc44c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1948,6 +1948,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+ }
+
++/**
++ * i40e_sync_vf_state
++ * @vf: pointer to the VF info
++ * @state: VF state
++ *
++ * Called from a VF message to synchronize the service with a potential
++ * VF reset state
++ **/
++static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
++{
++ int i;
++
++ /* When handling some messages, it needs VF state to be set.
++ * It is possible that this flag is cleared during VF reset,
++ * so there is a need to wait until the end of the reset to
++ * handle the request message correctly.
++ */
++ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
++ if (test_bit(state, &vf->vf_states))
++ return true;
++ usleep_range(10000, 20000);
++ }
++
++ return test_bit(state, &vf->vf_states);
++}
++
+ /**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the VF info
+@@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ size_t len = 0;
+ int ret;
+
+- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
+ bool allmulti = false;
+ bool alluni = false;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+@@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+ struct i40e_vsi *vsi;
+ u16 num_qps_all = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+@@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ int i;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+@@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+@@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
+ u8 cur_pairs = vf->num_queue_pairs;
+ struct i40e_pf *pf = vf->pf;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
+ return -EINVAL;
+
+ if (req_pairs > I40E_MAX_VF_QUEUES) {
+@@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+
+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+@@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ i40e_status ret = 0;
+ int i;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+@@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ i40e_status ret = 0;
+ int i;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+@@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ int i;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+@@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
+ struct i40e_vsi *vsi = NULL;
+ i40e_status aq_ret = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
++ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ u16 i;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
+- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
++ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ int len = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+@@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+ i40e_status aq_ret = 0;
+ u64 speed = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+@@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+
+ /* set this flag only after making sure all inputs are sane */
+ vf->adq_enabled = true;
+- /* num_req_queues is set when user changes number of queues via ethtool
+- * and this causes issue for default VSI(which depends on this variable)
+- * when ADq is enabled, hence reset it.
+- */
+- vf->num_req_queues = 0;
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_reset_vf(vf, true);
+@@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+
+- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
++ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index 091e32c1bb46f..49575a640a84c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -18,6 +18,8 @@
+
+ #define I40E_MAX_VF_PROMISC_FLAGS 3
+
++#define I40E_VF_STATE_WAIT_COUNT 20
++
+ /* Various queue ctrls */
+ enum i40e_queue_ctrl {
+ I40E_QUEUE_CTRL_UNKNOWN = 0,
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index 0cecaff38d042..461f5237a2f88 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -615,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+- new_tx_count = clamp_t(u32, ring->tx_pending,
+- IAVF_MIN_TXD,
+- IAVF_MAX_TXD);
+- new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
++ if (ring->tx_pending > IAVF_MAX_TXD ||
++ ring->tx_pending < IAVF_MIN_TXD ||
++ ring->rx_pending > IAVF_MAX_RXD ||
++ ring->rx_pending < IAVF_MIN_RXD) {
++ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
++ ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
++ IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
++ return -EINVAL;
++ }
+
+- new_rx_count = clamp_t(u32, ring->rx_pending,
+- IAVF_MIN_RXD,
+- IAVF_MAX_RXD);
+- new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
++ new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
++ if (new_tx_count != ring->tx_pending)
++ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
++ new_tx_count);
++
++ new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
++ if (new_rx_count != ring->rx_pending)
++ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
++ new_rx_count);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_desc_count) &&
+- (new_rx_count == adapter->rx_desc_count))
++ (new_rx_count == adapter->rx_desc_count)) {
++ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
+ return 0;
++ }
+
+- adapter->tx_desc_count = new_tx_count;
+- adapter->rx_desc_count = new_rx_count;
++ if (new_tx_count != adapter->tx_desc_count) {
++ netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
++ adapter->tx_desc_count, new_tx_count);
++ adapter->tx_desc_count = new_tx_count;
++ }
++
++ if (new_rx_count != adapter->rx_desc_count) {
++ netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
++ adapter->rx_desc_count, new_rx_count);
++ adapter->rx_desc_count = new_rx_count;
++ }
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index fd3717ae70ab1..4f3b025daa14f 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2199,6 +2199,7 @@ static void iavf_reset_task(struct work_struct *work)
+ }
+
+ pci_set_master(adapter->pdev);
++ pci_restore_msi_state(adapter->pdev);
+
+ if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index f622ee20ac40d..819c32a721e84 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5621,6 +5621,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ netif_carrier_on(vsi->netdev);
+ }
+
++ /* clear this now, and the first stats read will be used as baseline */
++ vsi->stat_offsets_loaded = false;
++
+ ice_service_task_schedule(pf);
+
+ return 0;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index bbd054fcda8cd..2baa909290b3b 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
+ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+ if (priv->percpu_pools) {
+- err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
++ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
+ if (err < 0)
+ goto err_free_dma;
+
+- err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
++ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
+ if (err < 0)
+ goto err_unregister_rxq_short;
+
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+index d7ac0307797fd..34c0d2ddf9ef6 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
+ return -ENOMEM;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+- if (!cache)
++ if (!cache) {
++ nfp_cpp_area_free(area);
+ return -ENOMEM;
++ }
+
+ cache->id = 0;
+ cache->addr = 0;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index 065e9004598ee..999abcfe3310a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ data_split = true;
+ }
+ } else {
++ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
++ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
++ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
++ qede_update_tx_producer(txq);
++ return NETDEV_TX_OK;
++ }
++
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index c00ad57575eab..4eb9ea280474f 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -3478,20 +3478,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+- err = ql_wait_for_drvr_lock(qdev);
+- if (err) {
+- err = ql_adapter_initialize(qdev);
+- if (err) {
+- netdev_err(ndev, "Unable to initialize adapter\n");
+- goto err_init;
+- }
+- netdev_err(ndev, "Releasing driver lock\n");
+- ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+- } else {
++ if (!ql_wait_for_drvr_lock(qdev)) {
+ netdev_err(ndev, "Could not acquire driver lock\n");
++ err = -ENODEV;
+ goto err_lock;
+ }
+
++ err = ql_adapter_initialize(qdev);
++ if (err) {
++ netdev_err(ndev, "Unable to initialize adapter\n");
++ goto err_init;
++ }
++ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
++
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 24753a4da7e60..e303b522efb50 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
++ if (max == 0)
++ max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+
+ /* some devices set dwNtbOutMaxSize too low for the above default */
+ min = min(min, max);
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 131c745dc7010..b2242a082431c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -770,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+
+ skb->dev = vrf_dev;
+
+- vrf_nf_set_untracked(skb);
+-
+ err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
+
+@@ -792,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+ return skb;
+
++ vrf_nf_set_untracked(skb);
++
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return vrf_ip6_out_direct(vrf_dev, sk, skb);
+@@ -1000,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+
+ skb->dev = vrf_dev;
+
+- vrf_nf_set_untracked(skb);
+-
+ err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
+
+@@ -1023,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+ ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ return skb;
+
++ vrf_nf_set_untracked(skb);
++
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return vrf_ip_out_direct(vrf_dev, sk, skb);
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index 26c7ae242db67..49c0b1ad40a02 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_RESUME:
+- ret = mhi_pm_resume(ab_pci->mhi_ctrl);
++ /* Do force MHI resume as some devices like QCA6390, WCN6855
++ * are not in M3 state but they are functional. So just ignore
++ * the MHI state while resuming.
++ */
++ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index c5300d49807a2..c3b725afa11fd 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -32,7 +32,6 @@
+ #define PCIE_CORE_DEV_ID_REG 0x0
+ #define PCIE_CORE_CMD_STATUS_REG 0x4
+ #define PCIE_CORE_DEV_REV_REG 0x8
+-#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
+ #define PCIE_CORE_PCIEXP_CAP 0xc0
+ #define PCIE_CORE_ERR_CAPCTL_REG 0x118
+ #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
+@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+- case PCI_ROM_ADDRESS1:
+- *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
+- return PCI_BRIDGE_EMUL_HANDLED;
+-
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
+ break;
+
+- case PCI_ROM_ADDRESS1:
+- advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
+- break;
+-
+ case PCI_INTERRUPT_LINE:
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index fc95620101e85..54b88bedecdf1 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -70,7 +70,7 @@
+ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+ #define AMD_CPU_ID_YC 0x14B5
+
+-#define PMC_MSG_DELAY_MIN_US 100
++#define PMC_MSG_DELAY_MIN_US 50
+ #define RESPONSE_REGISTER_LOOP_MAX 20000
+
+ #define SOC_SUBSYSTEM_IP_MAX 12
+diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
+index 08598942a6d78..13f8cf70b9aee 100644
+--- a/drivers/platform/x86/intel/hid.c
++++ b/drivers/platform/x86/intel/hid.c
+@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+ },
+ },
++ {
++ .ident = "Microsoft Surface Go 3",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 729d8252028e8..a25a34535b7a4 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -281,12 +281,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm8001_setup_irq failed [ret: %d]\n", rc);
+- goto err_out_shost;
++ goto err_out;
+ }
+ /* Request Interrupt */
+ rc = pm8001_request_irq(pm8001_ha);
+ if (rc)
+- goto err_out_shost;
++ goto err_out;
+
+ count = pm8001_ha->max_q_num;
+ /* Queues are chosen based on the number of cores/msix availability */
+@@ -422,8 +422,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+ pm8001_tag_init(pm8001_ha);
+ return 0;
+
+-err_out_shost:
+- scsi_remove_host(pm8001_ha->shost);
+ err_out_nodev:
+ for (i = 0; i < pm8001_ha->max_memcnt; i++) {
+ if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index 25549a8a2d72d..7cf1f78cbaeee 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
+ struct va_format vaf;
+ char pbuf[64];
+
++ if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
++ return;
++
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 1b1a63a467816..84d27938a3a60 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -4334,7 +4334,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
+ rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
+ max_zones);
+
+- arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
++ arr = kzalloc(alloc_len, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+index 2b8a3235d518b..3b3e81f99a34c 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
+ };
+
+ static const struct mmio_reg tgl_fivr_mmio_regs[] = {
+- { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
++ { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
+ { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
+ { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
+ { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b199eb65f3780..00e28456e4cc2 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
+- maxp = usb_endpoint_maxp(&endpoint->desc);
++ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+- /* Bits 12..11 are allowed only for HS periodic endpoints */
++ /* Multiple-transactions bits are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+- i = maxp & (BIT(12) | BIT(11));
++ i = maxp & USB_EP_MAXP_MULT_MASK;
+ maxp &= ~i;
+ }
+ fallthrough;
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 9abbd01028c5f..3cb01cdd02c29 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *dwc3_np;
+ struct device *dev = &pdev->dev;
+- struct property *prop;
+ int ret;
+
+ dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
+- prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+- if (!prop) {
+- ret = -ENOMEM;
+- dev_err(dev, "unable to allocate memory for property\n");
+- goto node_put;
+- }
+-
+- prop->name = "tx-fifo-resize";
+- ret = of_add_property(dwc3_np, prop);
+- if (ret) {
+- dev_err(dev, "unable to add property\n");
+- goto node_put;
+- }
+-
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 core - %d\n", ret);
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 504c1cbc255d1..284eea9f6e4d8 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ struct usb_function *f = NULL;
+ u8 endp;
+
++ if (w_length > USB_COMP_EP0_BUFSIZ) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ goto done;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
++ w_length = USB_COMP_EP0_BUFSIZ;
++ }
++ }
++
+ /* partial re-init of the response message; the function or the
+ * gadget might need to intercept e.g. a control-OUT completion
+ * when we delegate to it.
+@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
+ if (!cdev->req)
+ return -ENOMEM;
+
+- cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
++ cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+ if (!cdev->req->buf)
+ goto fail;
+
+diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
+index 255a61bd6a6a8..9d5f17b551bbd 100644
+--- a/drivers/usb/gadget/function/uvc.h
++++ b/drivers/usb/gadget/function/uvc.h
+@@ -126,6 +126,7 @@ struct uvc_device {
+ enum uvc_state state;
+ struct usb_function func;
+ struct uvc_video video;
++ bool func_connected;
+
+ /* Descriptors */
+ struct {
+@@ -156,6 +157,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
+ struct uvc_file_handle {
+ struct v4l2_fh vfh;
+ struct uvc_video *device;
++ bool is_uvc_app_handle;
+ };
+
+ #define to_uvc_file_handle(handle) \
+diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
+index 4ca89eab61590..197c26f7aec63 100644
+--- a/drivers/usb/gadget/function/uvc_v4l2.c
++++ b/drivers/usb/gadget/function/uvc_v4l2.c
+@@ -227,17 +227,55 @@ static int
+ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+ {
++ struct uvc_device *uvc = video_get_drvdata(fh->vdev);
++ struct uvc_file_handle *handle = to_uvc_file_handle(fh);
++ int ret;
++
+ if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
+ return -EINVAL;
+
+- return v4l2_event_subscribe(fh, sub, 2, NULL);
++ if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
++ return -EBUSY;
++
++ ret = v4l2_event_subscribe(fh, sub, 2, NULL);
++ if (ret < 0)
++ return ret;
++
++ if (sub->type == UVC_EVENT_SETUP) {
++ uvc->func_connected = true;
++ handle->is_uvc_app_handle = true;
++ uvc_function_connect(uvc);
++ }
++
++ return 0;
++}
++
++static void uvc_v4l2_disable(struct uvc_device *uvc)
++{
++ uvc->func_connected = false;
++ uvc_function_disconnect(uvc);
++ uvcg_video_enable(&uvc->video, 0);
++ uvcg_free_buffers(&uvc->video.queue);
+ }
+
+ static int
+ uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+ {
+- return v4l2_event_unsubscribe(fh, sub);
++ struct uvc_device *uvc = video_get_drvdata(fh->vdev);
++ struct uvc_file_handle *handle = to_uvc_file_handle(fh);
++ int ret;
++
++ ret = v4l2_event_unsubscribe(fh, sub);
++ if (ret < 0)
++ return ret;
++
++ if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
++ uvc_v4l2_disable(uvc);
++ handle->is_uvc_app_handle = false;
++ }
++
++ return 0;
+ }
+
+ static long
+@@ -292,7 +330,6 @@ uvc_v4l2_open(struct file *file)
+ handle->device = &uvc->video;
+ file->private_data = &handle->vfh;
+
+- uvc_function_connect(uvc);
+ return 0;
+ }
+
+@@ -304,11 +341,9 @@ uvc_v4l2_release(struct file *file)
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct uvc_video *video = handle->device;
+
+- uvc_function_disconnect(uvc);
+-
+ mutex_lock(&video->mutex);
+- uvcg_video_enable(video, 0);
+- uvcg_free_buffers(&video->queue);
++ if (handle->is_uvc_app_handle)
++ uvc_v4l2_disable(uvc);
+ mutex_unlock(&video->mutex);
+
+ file->private_data = NULL;
+diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
+index e1d566c9918ae..355bc7dab9d5f 100644
+--- a/drivers/usb/gadget/legacy/dbgp.c
++++ b/drivers/usb/gadget/legacy/dbgp.c
+@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
+ goto fail_1;
+ }
+
+- req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
++ req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
+ if (!req->buf) {
+ err = -ENOMEM;
+ stp = 2;
+@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
+ void *data = NULL;
+ u16 len = 0;
+
++ if (length > DBGP_REQ_LEN) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ return err;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(DBGP_REQ_LEN);
++ length = DBGP_REQ_LEN;
++ }
++ }
++
++
+ if (request == USB_REQ_GET_DESCRIPTOR) {
+ switch (value>>8) {
+ case USB_DT_DEVICE:
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 539220d7f5b62..0a4041552ed19 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -110,6 +110,8 @@ enum ep0_state {
+ /* enough for the whole queue: most events invalidate others */
+ #define N_EVENT 5
+
++#define RBUF_SIZE 256
++
+ struct dev_data {
+ spinlock_t lock;
+ refcount_t count;
+@@ -144,7 +146,7 @@ struct dev_data {
+ struct dentry *dentry;
+
+ /* except this scratch i/o buffer for ep0 */
+- u8 rbuf [256];
++ u8 rbuf[RBUF_SIZE];
+ };
+
+ static inline void get_dev (struct dev_data *data)
+@@ -1334,6 +1336,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
++ if (w_length > RBUF_SIZE) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ return value;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(RBUF_SIZE);
++ w_length = RBUF_SIZE;
++ }
++ }
++
+ spin_lock (&dev->lock);
+ dev->setup_abort = 0;
+ if (dev->state == STATE_DEV_UNCONNECTED) {
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index af946c42b6f0a..df3522dab31b5 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
+ continue;
+
+ retval = xhci_disable_slot(xhci, i);
++ xhci_free_virt_device(xhci, i);
+ if (retval)
+ xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
+ i, retval);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index eaa49aef29352..d0b6806275e01 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1525,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+- xhci_free_virt_device(xhci, slot_id);
+ }
+
+ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 902f410874e8e..f5b1bcc875ded 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ struct xhci_slot_ctx *slot_ctx;
+ int i, ret;
+
+-#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * We called pm_runtime_get_noresume when the device was attached.
+ * Decrement the counter here to allow controller to runtime suspend
+@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_put_noidle(hcd->self.controller);
+-#endif
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+ /* If the host is halted due to driver unload, we still need to free the
+@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
+ }
+ virt_dev->udev = NULL;
+- ret = xhci_disable_slot(xhci, udev->slot_id);
+- if (ret)
+- xhci_free_virt_device(xhci, udev->slot_id);
++ xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+ }
+
+ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+ u32 state;
+ int ret = 0;
+
+- command = xhci_alloc_command(xhci, false, GFP_KERNEL);
++ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
++
++ wait_for_completion(command->completion);
++
++ if (command->status != COMP_SUCCESS)
++ xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
++ slot_id, command->status);
++
++ xhci_free_command(xhci, command);
++
+ return ret;
+ }
+
+@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+
+ xhci_debugfs_create_slot(xhci, slot_id);
+
+-#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * If resetting upon resume, we can't put the controller into runtime
+ * suspend if there is a device attached.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_get_noresume(hcd->self.controller);
+-#endif
+
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+
+ disable_slot:
+- ret = xhci_disable_slot(xhci, udev->slot_id);
+- if (ret)
+- xhci_free_virt_device(xhci, udev->slot_id);
++ xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+
+ return 0;
+ }
+@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+
+ mutex_unlock(&xhci->mutex);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+ if (!ret)
+ xhci_alloc_dev(hcd, udev);
+ kfree(command->completion);
+diff --git a/fs/aio.c b/fs/aio.c
+index 51b08ab01dffc..1a78979663dca 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -181,8 +181,9 @@ struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+- bool done;
+ bool cancelled;
++ bool work_scheduled;
++ bool work_need_resched;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+ };
+@@ -1620,6 +1621,51 @@ static void aio_poll_put_work(struct work_struct *work)
+ iocb_put(iocb);
+ }
+
++/*
++ * Safely lock the waitqueue which the request is on, synchronizing with the
++ * case where the ->poll() provider decides to free its waitqueue early.
++ *
++ * Returns true on success, meaning that req->head->lock was locked, req->wait
++ * is on req->head, and an RCU read lock was taken. Returns false if the
++ * request was already removed from its waitqueue (which might no longer exist).
++ */
++static bool poll_iocb_lock_wq(struct poll_iocb *req)
++{
++ wait_queue_head_t *head;
++
++ /*
++ * While we hold the waitqueue lock and the waitqueue is nonempty,
++ * wake_up_pollfree() will wait for us. However, taking the waitqueue
++ * lock in the first place can race with the waitqueue being freed.
++ *
++ * We solve this as eventpoll does: by taking advantage of the fact that
++ * all users of wake_up_pollfree() will RCU-delay the actual free. If
++ * we enter rcu_read_lock() and see that the pointer to the queue is
++ * non-NULL, we can then lock it without the memory being freed out from
++ * under us, then check whether the request is still on the queue.
++ *
++ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
++ * case the caller deletes the entry from the queue, leaving it empty.
++ * In that case, only RCU prevents the queue memory from being freed.
++ */
++ rcu_read_lock();
++ head = smp_load_acquire(&req->head);
++ if (head) {
++ spin_lock(&head->lock);
++ if (!list_empty(&req->wait.entry))
++ return true;
++ spin_unlock(&head->lock);
++ }
++ rcu_read_unlock();
++ return false;
++}
++
++static void poll_iocb_unlock_wq(struct poll_iocb *req)
++{
++ spin_unlock(&req->head->lock);
++ rcu_read_unlock();
++}
++
+ static void aio_poll_complete_work(struct work_struct *work)
+ {
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+@@ -1639,14 +1685,27 @@ static void aio_poll_complete_work(struct work_struct *work)
+ * avoid further branches in the fast path.
+ */
+ spin_lock_irq(&ctx->ctx_lock);
+- if (!mask && !READ_ONCE(req->cancelled)) {
+- add_wait_queue(req->head, &req->wait);
+- spin_unlock_irq(&ctx->ctx_lock);
+- return;
+- }
++ if (poll_iocb_lock_wq(req)) {
++ if (!mask && !READ_ONCE(req->cancelled)) {
++ /*
++ * The request isn't actually ready to be completed yet.
++ * Reschedule completion if another wakeup came in.
++ */
++ if (req->work_need_resched) {
++ schedule_work(&req->work);
++ req->work_need_resched = false;
++ } else {
++ req->work_scheduled = false;
++ }
++ poll_iocb_unlock_wq(req);
++ spin_unlock_irq(&ctx->ctx_lock);
++ return;
++ }
++ list_del_init(&req->wait.entry);
++ poll_iocb_unlock_wq(req);
++ } /* else, POLLFREE has freed the waitqueue, so we must complete */
+ list_del_init(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+- req->done = true;
+ spin_unlock_irq(&ctx->ctx_lock);
+
+ iocb_put(iocb);
+@@ -1658,13 +1717,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
+ struct poll_iocb *req = &aiocb->poll;
+
+- spin_lock(&req->head->lock);
+- WRITE_ONCE(req->cancelled, true);
+- if (!list_empty(&req->wait.entry)) {
+- list_del_init(&req->wait.entry);
+- schedule_work(&aiocb->poll.work);
+- }
+- spin_unlock(&req->head->lock);
++ if (poll_iocb_lock_wq(req)) {
++ WRITE_ONCE(req->cancelled, true);
++ if (!req->work_scheduled) {
++ schedule_work(&aiocb->poll.work);
++ req->work_scheduled = true;
++ }
++ poll_iocb_unlock_wq(req);
++ } /* else, the request was force-cancelled by POLLFREE already */
+
+ return 0;
+ }
+@@ -1681,21 +1741,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ if (mask && !(mask & req->events))
+ return 0;
+
+- list_del_init(&req->wait.entry);
+-
+- if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
++ /*
++ * Complete the request inline if possible. This requires that three
++ * conditions be met:
++ * 1. An event mask must have been passed. If a plain wakeup was done
++ * instead, then mask == 0 and we have to call vfs_poll() to get
++ * the events, so inline completion isn't possible.
++ * 2. The completion work must not have already been scheduled.
++ * 3. ctx_lock must not be busy. We have to use trylock because we
++ * already hold the waitqueue lock, so this inverts the normal
++ * locking order. Use irqsave/irqrestore because not all
++ * filesystems (e.g. fuse) call this function with IRQs disabled,
++ * yet IRQs have to be disabled before ctx_lock is obtained.
++ */
++ if (mask && !req->work_scheduled &&
++ spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ struct kioctx *ctx = iocb->ki_ctx;
+
+- /*
+- * Try to complete the iocb inline if we can. Use
+- * irqsave/irqrestore because not all filesystems (e.g. fuse)
+- * call this function with IRQs disabled and because IRQs
+- * have to be disabled before ctx_lock is obtained.
+- */
++ list_del_init(&req->wait.entry);
+ list_del(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+- req->done = true;
+- if (iocb->ki_eventfd && eventfd_signal_allowed()) {
++ if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
+ iocb = NULL;
+ INIT_WORK(&req->work, aio_poll_put_work);
+ schedule_work(&req->work);
+@@ -1704,7 +1770,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ if (iocb)
+ iocb_put(iocb);
+ } else {
+- schedule_work(&req->work);
++ /*
++ * Schedule the completion work if needed. If it was already
++ * scheduled, record that another wakeup came in.
++ *
++ * Don't remove the request from the waitqueue here, as it might
++ * not actually be complete yet (we won't know until vfs_poll()
++ * is called), and we must not miss any wakeups. POLLFREE is an
++ * exception to this; see below.
++ */
++ if (req->work_scheduled) {
++ req->work_need_resched = true;
++ } else {
++ schedule_work(&req->work);
++ req->work_scheduled = true;
++ }
++
++ /*
++ * If the waitqueue is being freed early but we can't complete
++ * the request inline, we have to tear down the request as best
++ * we can. That means immediately removing the request from its
++ * waitqueue and preventing all further accesses to the
++ * waitqueue via the request. We also need to schedule the
++ * completion work (done above). Also mark the request as
++ * cancelled, to potentially skip an unneeded call to ->poll().
++ */
++ if (mask & POLLFREE) {
++ WRITE_ONCE(req->cancelled, true);
++ list_del_init(&req->wait.entry);
++
++ /*
++ * Careful: this *must* be the last step, since as soon
++ * as req->head is NULL'ed out, the request can be
++ * completed and freed, since aio_poll_complete_work()
++ * will no longer need to take the waitqueue lock.
++ */
++ smp_store_release(&req->head, NULL);
++ }
+ }
+ return 1;
+ }
+@@ -1712,6 +1814,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
++ bool queued;
+ int error;
+ };
+
+@@ -1722,11 +1825,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
+
+ /* multiple wait queues per file are not supported */
+- if (unlikely(pt->iocb->poll.head)) {
++ if (unlikely(pt->queued)) {
+ pt->error = -EINVAL;
+ return;
+ }
+
++ pt->queued = true;
+ pt->error = 0;
+ pt->iocb->poll.head = head;
+ add_wait_queue(head, &pt->iocb->poll.wait);
+@@ -1751,12 +1855,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+
+ req->head = NULL;
+- req->done = false;
+ req->cancelled = false;
++ req->work_scheduled = false;
++ req->work_need_resched = false;
+
+ apt.pt._qproc = aio_poll_queue_proc;
+ apt.pt._key = req->events;
+ apt.iocb = aiocb;
++ apt.queued = false;
+ apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
+
+ /* initialized the list so that we can do list_empty checks */
+@@ -1765,23 +1871,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+
+ mask = vfs_poll(req->file, &apt.pt) & req->events;
+ spin_lock_irq(&ctx->ctx_lock);
+- if (likely(req->head)) {
+- spin_lock(&req->head->lock);
+- if (unlikely(list_empty(&req->wait.entry))) {
+- if (apt.error)
++ if (likely(apt.queued)) {
++ bool on_queue = poll_iocb_lock_wq(req);
++
++ if (!on_queue || req->work_scheduled) {
++ /*
++ * aio_poll_wake() already either scheduled the async
++ * completion work, or completed the request inline.
++ */
++ if (apt.error) /* unsupported case: multiple queues */
+ cancel = true;
+ apt.error = 0;
+ mask = 0;
+ }
+ if (mask || apt.error) {
++ /* Steal to complete synchronously. */
+ list_del_init(&req->wait.entry);
+ } else if (cancel) {
++ /* Cancel if possible (may be too late though). */
+ WRITE_ONCE(req->cancelled, true);
+- } else if (!req->done) { /* actually waiting for an event */
++ } else if (on_queue) {
++ /*
++ * Actually waiting for an event, so add the request to
++ * active_reqs so that it can be cancelled if needed.
++ */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+- spin_unlock(&req->head->lock);
++ if (on_queue)
++ poll_iocb_unlock_wq(req);
+ }
+ if (mask) { /* no async, we'd stolen it */
+ aiocb->ki_res.res = mangle_poll(mask);
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 2059d1504149a..40c4d6ba3fb9a 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
+
+ /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
+ ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
+- if (ret < 0)
++ if (ret < 0) {
+ btrfs_free_reserved_data_space_noquota(fs_info, len);
+- else
++ extent_changeset_free(*reserved);
++ *reserved = NULL;
++ } else {
+ ret = 0;
++ }
+ return ret;
+ }
+
+@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
+ if (ret < 0)
+ return ret;
+ ret = btrfs_delalloc_reserve_metadata(inode, len);
+- if (ret < 0)
++ if (ret < 0) {
+ btrfs_free_reserved_data_space(inode, *reserved, start, len);
++ extent_changeset_free(*reserved);
++ *reserved = NULL;
++ }
+ return ret;
+ }
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index a40fb9c74dda3..beac825edca1b 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4284,6 +4284,12 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
+ if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
+ return;
+
++ /*
++ * A read may stumble upon this buffer later, make sure that it gets an
++ * error and knows there was an error.
++ */
++ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
++
+ /*
+ * If we error out, we should add back the dirty_metadata_bytes
+ * to make it consistent.
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 702dc5441f039..db37a37996497 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -336,7 +336,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ key.offset = ref_id;
+ again:
+ ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
+- BUG_ON(ret < 0);
++ if (ret < 0)
++ goto out;
+ if (ret == 0) {
+ leaf = path->nodes[0];
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index ca33f10b11123..110e63d6727d0 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2879,6 +2879,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
+ path->nodes[*level]->len);
+ if (ret)
+ return ret;
++ btrfs_redirty_list_add(trans->transaction,
++ next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
+@@ -2959,6 +2961,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
+ next->start, next->len);
+ if (ret)
+ goto out;
++ btrfs_redirty_list_add(trans->transaction, next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
+@@ -3412,8 +3415,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
+ extent_io_tree_release(&log->log_csum_range);
+
+- if (trans && log->node)
+- btrfs_redirty_list_add(trans->transaction, log->node);
+ btrfs_put_root(log);
+ }
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index f8ceddafb6fc4..e9b06e339c4b0 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -9775,7 +9775,7 @@ static void io_uring_drop_tctx_refs(struct task_struct *task)
+
+ /*
+ * Find any io_uring ctx that this task has registered or done IO on, and cancel
+- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
++ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
+ */
+ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ {
+@@ -9816,8 +9816,10 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ cancel_all);
+ }
+
+- prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
++ prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
++ io_run_task_work();
+ io_uring_drop_tctx_refs(current);
++
+ /*
+ * If we've seen completions, retry without waiting. This
+ * avoids a race where a completion comes in before we did
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 6fedc49726bf7..c634483d85d2a 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
+ int
+ register_cld_notifier(void)
+ {
++ WARN_ON(!nfsd_net_id);
+ return rpc_pipefs_notifier_register(&nfsd4_cld_block);
+ }
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 3f4027a5de883..61301affb4c1b 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+ return 0;
+ }
+
++static bool delegation_hashed(struct nfs4_delegation *dp)
++{
++ return !(list_empty(&dp->dl_perfile));
++}
++
+ static bool
+ unhash_delegation_locked(struct nfs4_delegation *dp)
+ {
+@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
+
+ lockdep_assert_held(&state_lock);
+
+- if (list_empty(&dp->dl_perfile))
++ if (!delegation_hashed(dp))
+ return false;
+
+ dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
+@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
+ * queued for a lease break. Don't queue it again.
+ */
+ spin_lock(&state_lock);
+- if (dp->dl_time == 0) {
++ if (delegation_hashed(dp) && dp->dl_time == 0) {
+ dp->dl_time = ktime_get_boottime_seconds();
+ list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
+ }
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 070e5dd03e26f..5ed04d6be9a59 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
+ int retval;
+ printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+- retval = register_cld_notifier();
+- if (retval)
+- return retval;
+ retval = nfsd4_init_slabs();
+ if (retval)
+- goto out_unregister_notifier;
++ return retval;
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
+ goto out_free_exports;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
++ goto out_free_filesystem;
++ retval = register_cld_notifier();
++ if (retval)
+ goto out_free_all;
+ return 0;
+ out_free_all:
++ unregister_pernet_subsys(&nfsd_net_ops);
++out_free_filesystem:
+ unregister_filesystem(&nfsd_fs_type);
+ out_free_exports:
+ remove_proc_entry("fs/nfs/exports", NULL);
+@@ -1561,13 +1563,12 @@ out_free_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+ nfsd4_free_slabs();
+-out_unregister_notifier:
+- unregister_cld_notifier();
+ return retval;
+ }
+
+ static void __exit exit_nfsd(void)
+ {
++ unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
+ nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
+ unregister_filesystem(&nfsd_fs_type);
+- unregister_cld_notifier();
+ }
+
+ MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 040e1cf905282..65ce0e72e7b95 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -35,17 +35,7 @@
+
+ void signalfd_cleanup(struct sighand_struct *sighand)
+ {
+- wait_queue_head_t *wqh = &sighand->signalfd_wqh;
+- /*
+- * The lockless check can race with remove_wait_queue() in progress,
+- * but in this case its caller should run under rcu_read_lock() and
+- * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
+- */
+- if (likely(!waitqueue_active(wqh)))
+- return;
+-
+- /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
+- wake_up_poll(wqh, EPOLLHUP | POLLFREE);
++ wake_up_pollfree(&sighand->signalfd_wqh);
+ }
+
+ struct signalfd_ctx {
+diff --git a/fs/smbfs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c
+index 85ba15a60b13b..043e4cb839fa2 100644
+--- a/fs/smbfs_common/cifs_arc4.c
++++ b/fs/smbfs_common/cifs_arc4.c
+@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
+ ctx->y = y;
+ }
+ EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
+-
+-static int __init
+-init_smbfs_common(void)
+-{
+- return 0;
+-}
+-static void __init
+-exit_smbfs_common(void)
+-{
+-}
+-
+-module_init(init_smbfs_common)
+-module_exit(exit_smbfs_common)
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index 925a621b432e3..3616839c5c4b6 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -161,6 +161,77 @@ struct tracefs_fs_info {
+ struct tracefs_mount_opts mount_opts;
+ };
+
++static void change_gid(struct dentry *dentry, kgid_t gid)
++{
++ if (!dentry->d_inode)
++ return;
++ dentry->d_inode->i_gid = gid;
++}
++
++/*
++ * Taken from d_walk, but without he need for handling renames.
++ * Nothing can be renamed while walking the list, as tracefs
++ * does not support renames. This is only called when mounting
++ * or remounting the file system, to set all the files to
++ * the given gid.
++ */
++static void set_gid(struct dentry *parent, kgid_t gid)
++{
++ struct dentry *this_parent;
++ struct list_head *next;
++
++ this_parent = parent;
++ spin_lock(&this_parent->d_lock);
++
++ change_gid(this_parent, gid);
++repeat:
++ next = this_parent->d_subdirs.next;
++resume:
++ while (next != &this_parent->d_subdirs) {
++ struct list_head *tmp = next;
++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
++ next = tmp->next;
++
++ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++
++ change_gid(dentry, gid);
++
++ if (!list_empty(&dentry->d_subdirs)) {
++ spin_unlock(&this_parent->d_lock);
++ spin_release(&dentry->d_lock.dep_map, _RET_IP_);
++ this_parent = dentry;
++ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
++ goto repeat;
++ }
++ spin_unlock(&dentry->d_lock);
++ }
++ /*
++ * All done at this level ... ascend and resume the search.
++ */
++ rcu_read_lock();
++ascend:
++ if (this_parent != parent) {
++ struct dentry *child = this_parent;
++ this_parent = child->d_parent;
++
++ spin_unlock(&child->d_lock);
++ spin_lock(&this_parent->d_lock);
++
++ /* go into the first sibling still alive */
++ do {
++ next = child->d_child.next;
++ if (next == &this_parent->d_subdirs)
++ goto ascend;
++ child = list_entry(next, struct dentry, d_child);
++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
++ rcu_read_unlock();
++ goto resume;
++ }
++ rcu_read_unlock();
++ spin_unlock(&this_parent->d_lock);
++ return;
++}
++
+ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+ {
+ substring_t args[MAX_OPT_ARGS];
+@@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+ if (!gid_valid(gid))
+ return -EINVAL;
+ opts->gid = gid;
++ set_gid(tracefs_mount->mnt_root, gid);
+ break;
+ case Opt_mode:
+ if (match_octal(&args[0], &option))
+@@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ inode->i_mode = mode;
+ inode->i_fop = fops ? fops : &tracefs_file_operations;
+ inode->i_private = data;
++ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
++ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
+ d_instantiate(dentry, inode);
+ fsnotify_create(dentry->d_parent->d_inode, dentry);
+ return end_creating(dentry);
+@@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
+ inode->i_op = ops;
+ inode->i_fop = &simple_dir_operations;
++ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
++ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
+
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bca11ac818c1f..6c4640526f741 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -723,6 +723,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
+ struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
+ void bpf_trampoline_put(struct bpf_trampoline *tr);
++int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
+ #define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+@@ -1320,28 +1321,16 @@ extern struct mutex bpf_stats_enabled_mutex;
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+- *
+- * Use the preemption safe inc/dec variants on RT because migrate disable
+- * is preemptible on RT and preemption in the middle of the RMW operation
+- * might lead to inconsistent state. Use the raw variants for non RT
+- * kernels as migrate_disable() maps to preempt_disable() so the slightly
+- * more expensive save operation can be avoided.
+ */
+ static inline void bpf_disable_instrumentation(void)
+ {
+ migrate_disable();
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- this_cpu_inc(bpf_prog_active);
+- else
+- __this_cpu_inc(bpf_prog_active);
++ this_cpu_inc(bpf_prog_active);
+ }
+
+ static inline void bpf_enable_instrumentation(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- this_cpu_dec(bpf_prog_active);
+- else
+- __this_cpu_dec(bpf_prog_active);
++ this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+ }
+
+diff --git a/include/linux/delay.h b/include/linux/delay.h
+index 1d0e2ce6b6d9f..e8607992c68a5 100644
+--- a/include/linux/delay.h
++++ b/include/linux/delay.h
+@@ -20,6 +20,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/sched.h>
+
+ extern unsigned long loops_per_jiffy;
+
+@@ -58,7 +59,18 @@ void calibrate_delay(void);
+ void __attribute__((weak)) calibration_delay_done(void);
+ void msleep(unsigned int msecs);
+ unsigned long msleep_interruptible(unsigned int msecs);
+-void usleep_range(unsigned long min, unsigned long max);
++void usleep_range_state(unsigned long min, unsigned long max,
++ unsigned int state);
++
++static inline void usleep_range(unsigned long min, unsigned long max)
++{
++ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
++}
++
++static inline void usleep_idle_range(unsigned long min, unsigned long max)
++{
++ usleep_range_state(min, max, TASK_IDLE);
++}
+
+ static inline void ssleep(unsigned int seconds)
+ {
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 28391de6cc445..1611dc9d44207 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -639,9 +639,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+- *
+- * For non RT enabled kernels migrate_disable/enable() maps to
+- * preempt_disable/enable(), i.e. it disables also preemption.
+ */
+ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 9e067f937dbc2..f453be385bd47 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
+ return hdev->ll_driver == driver;
+ }
+
++static inline bool hid_is_usb(struct hid_device *hdev)
++{
++ return hid_is_using_ll_driver(hdev, &usb_hid_driver);
++}
++
+ #define PM_HINT_FULLON 1<<5
+ #define PM_HINT_NORMAL 1<<1
+
+diff --git a/include/linux/mhi.h b/include/linux/mhi.h
+index 7239858790353..a5cc4cdf9cc86 100644
+--- a/include/linux/mhi.h
++++ b/include/linux/mhi.h
+@@ -663,6 +663,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+ */
+ int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
++/**
++ * mhi_pm_resume_force - Force resume MHI from suspended state
++ * @mhi_cntrl: MHI controller
++ *
++ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
++ * has to be in M3 state during resume. But some devices seem to be in a
++ * different MHI state other than M3 but they continue working fine if allowed.
++ * This API is intented to be used for such devices.
++ *
++ * Return: 0 if the resume succeeds, a negative error code otherwise
++ */
++int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
++
+ /**
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 222da43b7096d..eddd66d426caf 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
+ * pm_runtime_active - Check whether or not a device is runtime-active.
+ * @dev: Target device.
+ *
+- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
++ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
+ * %RPM_ACTIVE, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 93dab0e9580f8..d22cf2985b8fd 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
+ void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+ void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
++void __wake_up_pollfree(struct wait_queue_head *wq_head);
+
+ #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
+ #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
+@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+ #define wake_up_interruptible_sync_poll_locked(x, m) \
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+
++/**
++ * wake_up_pollfree - signal that a polled waitqueue is going away
++ * @wq_head: the wait queue head
++ *
++ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
++ * lifetime is tied to a task rather than to the 'struct file' being polled,
++ * this function must be called before the waitqueue is freed so that
++ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
++ *
++ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
++ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
++ */
++static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ /*
++ * For performance reasons, we don't always take the queue lock here.
++ * Therefore, we might race with someone removing the last entry from
++ * the queue, and proceed while they still hold the queue lock.
++ * However, rcu_read_lock() is required to be held in such cases, so we
++ * can safely proceed with an RCU-delayed free.
++ */
++ if (waitqueue_active(wq_head))
++ __wake_up_pollfree(wq_head);
++}
++
+ #define ___wait_cond_timeout(condition) \
+ ({ \
+ bool __cond = (condition); \
+diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
+index f6af76c87a6c3..191c36afa1f4a 100644
+--- a/include/net/bond_alb.h
++++ b/include/net/bond_alb.h
+@@ -126,7 +126,7 @@ struct tlb_slave_info {
+ struct alb_bond_info {
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ u32 unbalanced_load;
+- int tx_rebalance_counter;
++ atomic_t tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index cc663c68ddc4b..d24b0a34c8f0c 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+ /* jiffies until ct expires, 0 if already expired */
+ static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+ {
+- s32 timeout = ct->timeout - nfct_time_stamp;
++ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+ return timeout > 0 ? timeout : 0;
+ }
+
+ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+ {
+- return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
++ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
+ }
+
+ /* use after obtaining a reference count */
+@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+ static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+ {
+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+- ct->timeout = nfct_time_stamp + NF_CT_DAY;
++ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
+ }
+
+ struct kernel_param;
+diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
+index 41b509f410bf9..f9c520ce4bf4e 100644
+--- a/include/uapi/asm-generic/poll.h
++++ b/include/uapi/asm-generic/poll.h
+@@ -29,7 +29,7 @@
+ #define POLLRDHUP 0x2000
+ #endif
+
+-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
++#define POLLFREE (__force __poll_t)0x4000
+
+ #define POLL_BUSY_LOOP (__force __poll_t)0x8000
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 390d5661cd708..8a0b4879790e5 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8228,7 +8228,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
+
+ new_range = dst_reg->off;
+ if (range_right_open)
+- new_range--;
++ new_range++;
+
+ /* Examples for register markings:
+ *
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
+index 76577d1642a5d..eca38107b32f1 100644
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+
++void __wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
++ /* POLLFREE must have cleared the queue. */
++ WARN_ON_ONCE(waitqueue_active(wq_head));
++}
++
+ /*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index e3d2c23c413d4..85f1021ad4595 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
+ EXPORT_SYMBOL(msleep_interruptible);
+
+ /**
+- * usleep_range - Sleep for an approximate time
+- * @min: Minimum time in usecs to sleep
+- * @max: Maximum time in usecs to sleep
++ * usleep_range_state - Sleep for an approximate time in a given state
++ * @min: Minimum time in usecs to sleep
++ * @max: Maximum time in usecs to sleep
++ * @state: State of the current task that will be while sleeping
+ *
+ * In non-atomic context where the exact wakeup time is flexible, use
+- * usleep_range() instead of udelay(). The sleep improves responsiveness
++ * usleep_range_state() instead of udelay(). The sleep improves responsiveness
+ * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
+ * power usage by allowing hrtimers to take advantage of an already-
+ * scheduled interrupt instead of scheduling a new one just for this sleep.
+ */
+-void __sched usleep_range(unsigned long min, unsigned long max)
++void __sched usleep_range_state(unsigned long min, unsigned long max,
++ unsigned int state)
+ {
+ ktime_t exp = ktime_add_us(ktime_get(), min);
+ u64 delta = (u64)(max - min) * NSEC_PER_USEC;
+
+ for (;;) {
+- __set_current_state(TASK_UNINTERRUPTIBLE);
++ __set_current_state(state);
+ /* Do not return before the requested sleep time has elapsed */
+ if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
+ break;
+ }
+ }
+-EXPORT_SYMBOL(usleep_range);
++EXPORT_SYMBOL(usleep_range_state);
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 4a9d4e27d0d9b..02ff66f863587 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -947,6 +947,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
+ wb_shutdown(&bdi->wb);
+ cgwb_bdi_unregister(bdi);
+
++ /*
++ * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
++ * update the global bdi_min_ratio.
++ */
++ if (bdi->min_ratio)
++ bdi_set_min_ratio(bdi, 0);
++
+ if (bdi->dev) {
+ bdi_debug_unregister(bdi);
+ device_unregister(bdi->dev);
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 30e9211f494a7..7a4912d6e65f2 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -357,6 +357,15 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
+ return err;
+ }
+
++static void kdamond_usleep(unsigned long usecs)
++{
++ /* See Documentation/timers/timers-howto.rst for the thresholds */
++ if (usecs > 20 * 1000)
++ schedule_timeout_idle(usecs_to_jiffies(usecs));
++ else
++ usleep_idle_range(usecs, usecs + 1);
++}
++
+ /*
+ * __damon_stop() - Stops monitoring of given context.
+ * @ctx: monitoring context
+@@ -370,8 +379,7 @@ static int __damon_stop(struct damon_ctx *ctx)
+ ctx->kdamond_stop = true;
+ mutex_unlock(&ctx->kdamond_lock);
+ while (damon_kdamond_running(ctx))
+- usleep_range(ctx->sample_interval,
+- ctx->sample_interval * 2);
++ kdamond_usleep(ctx->sample_interval);
+ return 0;
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+@@ -670,7 +678,7 @@ static int kdamond_fn(void *data)
+ ctx->callback.after_sampling(ctx))
+ set_kdamond_stop(ctx);
+
+- usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
++ kdamond_usleep(ctx->sample_interval);
+
+ if (ctx->primitive.check_accesses)
+ max_nr_accesses = ctx->primitive.check_accesses(ctx);
+diff --git a/mm/slub.c b/mm/slub.c
+index d8f77346376d8..ca6ba6bdf27b1 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5072,6 +5072,7 @@ struct loc_track {
+ unsigned long max;
+ unsigned long count;
+ struct location *loc;
++ loff_t idx;
+ };
+
+ static struct dentry *slab_debugfs_root;
+@@ -6035,11 +6036,11 @@ __initcall(slab_sysfs_init);
+ #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
+ static int slab_debugfs_show(struct seq_file *seq, void *v)
+ {
+-
+- struct location *l;
+- unsigned int idx = *(unsigned int *)v;
+ struct loc_track *t = seq->private;
++ struct location *l;
++ unsigned long idx;
+
++ idx = (unsigned long) t->idx;
+ if (idx < t->count) {
+ l = &t->loc[idx];
+
+@@ -6088,16 +6089,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
+ {
+ struct loc_track *t = seq->private;
+
+- v = ppos;
+- ++*ppos;
++ t->idx = ++(*ppos);
+ if (*ppos <= t->count)
+- return v;
++ return ppos;
+
+ return NULL;
+ }
+
+ static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
+ {
++ struct loc_track *t = seq->private;
++
++ t->idx = *ppos;
+ return ppos;
+ }
+
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index a856ae401ea5c..6931713e363fd 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -4031,14 +4031,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+ return err;
+ }
+
+- if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+- info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+- info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+- dest_net = devlink_netns_get(skb, info);
+- if (IS_ERR(dest_net))
+- return PTR_ERR(dest_net);
+- }
+-
+ if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
+ action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
+ else
+@@ -4081,6 +4073,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+ return -EINVAL;
+ }
+ }
++ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
++ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
++ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
++ dest_net = devlink_netns_get(skb, info);
++ if (IS_ERR(dest_net))
++ return PTR_ERR(dest_net);
++ }
++
+ err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
+
+ if (dest_net)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 3e58037a8ae6f..ff049733cceeb 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -733,11 +733,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
+
+ ASSERT_RTNL();
+
+- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
++ n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ if (!n)
+ goto out;
+
+- n->protocol = 0;
+ write_pnet(&n->net, net);
+ memcpy(n->key, pkey, key_len);
+ n->dev = dev;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 1ae52ac943f62..8eb671c827f90 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+
+ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+ {
++ psock_set_prog(&psock->progs.stream_parser, NULL);
++
+ if (!psock->saved_data_ready)
+ return;
+
+@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+
+ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+ {
++ psock_set_prog(&psock->progs.stream_verdict, NULL);
++ psock_set_prog(&psock->progs.skb_verdict, NULL);
++
+ if (!psock->saved_data_ready)
+ return;
+
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index e252b8ec2b85e..c89f527411e84 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
+ write_lock_bh(&sk->sk_callback_lock);
+ if (strp_stop)
+ sk_psock_stop_strp(sk, psock);
+- else
++ if (verdict_stop)
+ sk_psock_stop_verdict(sk, psock);
++
++ if (psock->psock_update_sk_prot)
++ psock->psock_update_sk_prot(sk, psock, false);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ }
+@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
+
+ if (msg_parser)
+ psock_set_prog(&psock->progs.msg_parser, msg_parser);
++ if (stream_parser)
++ psock_set_prog(&psock->progs.stream_parser, stream_parser);
++ if (stream_verdict)
++ psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
++ if (skb_verdict)
++ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
+
+ ret = sock_map_init_proto(sk, psock);
+ if (ret < 0)
+@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
+ ret = sk_psock_init_strp(sk, psock);
+ if (ret)
+ goto out_unlock_drop;
+- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+- psock_set_prog(&psock->progs.stream_parser, stream_parser);
+ sk_psock_start_strp(sk, psock);
+ } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
+- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+ sk_psock_start_verdict(sk,psock);
+ } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
+- psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
+ sk_psock_start_verdict(sk, psock);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 1797a0a900195..b3729bdafb602 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
+ if (dev->dev.parent)
+ pm_runtime_get_sync(dev->dev.parent);
+
+- if (!netif_device_present(dev)) {
++ if (!netif_device_present(dev) ||
++ dev->reg_state == NETREG_UNREGISTERING) {
+ ret = -ENODEV;
+ goto err;
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 2ce3fca545d37..3f6823bdd31e5 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -917,7 +917,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
++ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 3adc5d9211ad6..d64855010948d 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++
++ /* the control block has been erased, so we have to set the
++ * iif once again.
++ * We read the receiving interface index directly from the
++ * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
++ * ip_rcv_core(...)).
++ */
++ IP6CB(skb)->iif = skb->skb_iif;
+ }
+
+ hdr->nexthdr = NEXTHDR_ROUTING;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 770a63103c7a4..4712a90a1820c 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+- s32 timeout = ct->timeout - nfct_time_stamp;
++ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+ tstamp->stop = ktime_get_real_ns();
+ if (timeout < 0)
+@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+ }
+
+ /* We want the clashing entry to go away real soon: 1 second timeout. */
+- loser_ct->timeout = nfct_time_stamp + HZ;
++ WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
+
+ /* IPS_NAT_CLASH removes the entry automatically on the first
+ * reply. Also prevents UDP tracker from moving the entry to
+@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
+ /* save hash for reusing when confirming */
+ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+ ct->status = 0;
+- ct->timeout = 0;
++ WRITE_ONCE(ct->timeout, 0);
+ write_pnet(&ct->ct_net, net);
+ memset(&ct->__nfct_init_offset, 0,
+ offsetof(struct nf_conn, proto) -
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index c7708bde057cb..81d03acf68d4d 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1998,7 +1998,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
+
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+- ct->timeout = nfct_time_stamp + (u32)timeout;
++ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+
+ if (test_bit(IPS_DYING_BIT, &ct->status))
+ return -ETIME;
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 87a7388b6c894..ed37bb9b4e588 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ if (timeout < 0)
+ timeout = 0;
+
+- if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
+- ct->timeout = nfct_time_stamp + timeout;
++ if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
++ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
+ }
+
+ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index af4ee874a067c..dbe1f2e7dd9ed 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+
+ tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
+ if (!tcph)
+- return;
++ goto err;
+
+ opt = (u8 *)tcph;
+ for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+ continue;
+
+ if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
+- return;
++ goto err;
+
+ if (skb_ensure_writable(pkt->skb,
+ nft_thoff(pkt) + i + priv->len))
+- return;
++ goto err;
+
+ tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
+ &tcphdr_len);
+ if (!tcph)
+- return;
++ goto err;
+
+ offset = i + priv->offset;
+
+@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+
+ return;
+ }
++ return;
++err:
++ regs->verdict.code = NFT_BREAK;
+ }
+
+ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index e517663e0cd17..6f4116e729581 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
+
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+- NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize);
++ NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_AND(7, 2, 3);
+
+ /* Stall */
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 49089c50872e6..082085c25a8e4 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1392,8 +1392,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
+ {
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+
+- nfc_device_iter_exit(iter);
+- kfree(iter);
++ if (iter) {
++ nfc_device_iter_exit(iter);
++ kfree(iter);
++ }
+
+ return 0;
+ }
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 830f3559f727a..d6aba6edd16e5 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
+ struct fq_pie_sched_data *q = qdisc_priv(sch);
+
+ tcf_block_put(q->block);
++ q->p_params.tupdate = 0;
+ del_timer_sync(&q->adapt_timer);
+ kvfree(q->flows);
+ }
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index 470dabc60aa0e..edff063e088d2 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
+ struct snd_ctl_elem_value *data,
+ int type, int count)
+ {
++ struct snd_ctl_elem_value32 __user *data32 = userdata;
+ int i, size;
+
+ if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
+ if (copy_to_user(valuep, data->value.bytes.data, size))
+ return -EFAULT;
+ }
++ if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
++ return -EFAULT;
+ return 0;
+ }
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 82a818734a5f7..20a0a4771b9a8 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
+ *
+ * Return the maximum value for field PAR.
+ */
+-static unsigned int
++static int
+ snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
+ snd_pcm_hw_param_t var, int *dir)
+ {
+@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *oss_params,
+ struct snd_pcm_hw_params *slave_params)
+ {
+- size_t s;
+- size_t oss_buffer_size, oss_period_size, oss_periods;
+- size_t min_period_size, max_period_size;
++ ssize_t s;
++ ssize_t oss_buffer_size;
++ ssize_t oss_period_size, oss_periods;
++ ssize_t min_period_size, max_period_size;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ size_t oss_frame_size;
+
+ oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
+ params_channels(oss_params) / 8;
+
++ oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
++ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
++ NULL);
++ if (oss_buffer_size <= 0)
++ return -EINVAL;
+ oss_buffer_size = snd_pcm_plug_client_size(substream,
+- snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
+- if (!oss_buffer_size)
++ oss_buffer_size * oss_frame_size);
++ if (oss_buffer_size <= 0)
+ return -EINVAL;
+ oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
+ if (atomic_read(&substream->mmap_count)) {
+@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+
+ min_period_size = snd_pcm_plug_client_size(substream,
+ snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
+- if (min_period_size) {
++ if (min_period_size > 0) {
+ min_period_size *= oss_frame_size;
+ min_period_size = roundup_pow_of_two(min_period_size);
+ if (oss_period_size < min_period_size)
+@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+
+ max_period_size = snd_pcm_plug_client_size(substream,
+ snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
+- if (max_period_size) {
++ if (max_period_size > 0) {
+ max_period_size *= oss_frame_size;
+ max_period_size = rounddown_pow_of_two(max_period_size);
+ if (oss_period_size > max_period_size)
+@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+ oss_periods = substream->oss.setup.periods;
+
+ s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
+- if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
++ if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+ s = runtime->oss.maxfrags;
+ if (oss_periods > s)
+ oss_periods = s;
+@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ err = -EINVAL;
+ goto failure;
+ }
+- choose_rate(substream, sparams, runtime->oss.rate);
+- snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
++
++ err = choose_rate(substream, sparams, runtime->oss.rate);
++ if (err < 0)
++ goto failure;
++ err = snd_pcm_hw_param_near(substream, sparams,
++ SNDRV_PCM_HW_PARAM_CHANNELS,
++ runtime->oss.channels, NULL);
++ if (err < 0)
++ goto failure;
+
+ format = snd_pcm_oss_format_from(runtime->oss.format);
+
+@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
+ if (runtime->oss.subdivision || runtime->oss.fragshift)
+ return -EINVAL;
+ fragshift = val & 0xffff;
+- if (fragshift >= 31)
++ if (fragshift >= 25) /* should be large enough */
+ return -EINVAL;
+ runtime->oss.fragshift = fragshift;
+ runtime->oss.maxfrags = (val >> 16) & 0xffff;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9ce7457533c96..3599f4c85ebf7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6503,22 +6503,26 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
+ /* for alc285_fixup_ideapad_s740_coef() */
+ #include "ideapad_s740_helper.c"
+
+-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+- const struct hda_fixup *fix,
+- int action)
++static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
++ WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
++ WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
++ WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
++ {}
++};
++
++static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
+ {
+ /*
+- * A certain other OS sets these coeffs to different values. On at least one TongFang
+- * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+- * values are explicitly reset to default here. Without this, the external microphone is
+- * always in a plugged-in state, while the internal microphone is always in an unplugged
+- * state, breaking the ability to use the internal microphone.
+- */
+- alc_write_coef_idx(codec, 0x24, 0x0000);
+- alc_write_coef_idx(codec, 0x26, 0x0000);
+- alc_write_coef_idx(codec, 0x29, 0x3000);
+- alc_write_coef_idx(codec, 0x37, 0xfe05);
+- alc_write_coef_idx(codec, 0x45, 0x5089);
++ * A certain other OS sets these coeffs to different values. On at least
++ * one TongFang barebone these settings might survive even a cold
++ * reboot. So to restore a clean slate the values are explicitly reset
++ * to default here. Without this, the external microphone is always in a
++ * plugged-in state, while the internal microphone is always in an
++ * unplugged state, breaking the ability to use the internal microphone.
++ */
++ alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
+ }
+
+ static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
+@@ -6759,7 +6763,7 @@ enum {
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+ ALC287_FIXUP_13S_GEN2_SPEAKERS,
+- ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
++ ALC256_FIXUP_SET_COEF_DEFAULTS,
+ ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC233_FIXUP_NO_AUDIO_JACK,
+ };
+@@ -8465,9 +8469,9 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+- [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
++ [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
+ .type = HDA_FIXUP_FUNC,
+- .v.func = alc256_fixup_tongfang_reset_persistent_settings,
++ .v.func = alc256_fixup_set_coef_defaults,
+ },
+ [ALC245_FIXUP_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -8929,7 +8933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+- SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
++ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -10231,6 +10235,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
+ }
+ }
+
++static void alc897_hp_automute_hook(struct hda_codec *codec,
++ struct hda_jack_callback *jack)
++{
++ struct alc_spec *spec = codec->spec;
++ int vref;
++
++ snd_hda_gen_hp_automute(codec, jack);
++ vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
++ snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
++ vref);
++}
++
++static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++ }
++}
++
+ static const struct coef_fw alc668_coefs[] = {
+ WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
+ WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
+@@ -10311,6 +10336,8 @@ enum {
+ ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+ ALC668_FIXUP_HEADSET_MIC,
+ ALC668_FIXUP_MIC_DET_COEF,
++ ALC897_FIXUP_LENOVO_HEADSET_MIC,
++ ALC897_FIXUP_HEADSET_MIC_PIN,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -10717,6 +10744,19 @@ static const struct hda_fixup alc662_fixups[] = {
+ {}
+ },
+ },
++ [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc897_fixup_lenovo_headset_mic,
++ },
++ [ALC897_FIXUP_HEADSET_MIC_PIN] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1a, 0x03a11050 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -10761,6 +10801,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
+ SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
++ SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
+index d550c0705c28b..5ac2b1444694d 100644
+--- a/sound/soc/codecs/rt5682.c
++++ b/sound/soc/codecs/rt5682.c
+@@ -2840,6 +2840,8 @@ static int rt5682_register_dai_clks(struct snd_soc_component *component)
+
+ for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
+ struct clk_init_data init = { };
++ struct clk_parent_data parent_data;
++ const struct clk_hw *parent;
+
+ dai_clk_hw = &rt5682->dai_clks_hw[i];
+
+@@ -2847,17 +2849,17 @@ static int rt5682_register_dai_clks(struct snd_soc_component *component)
+ case RT5682_DAI_WCLK_IDX:
+ /* Make MCLK the parent of WCLK */
+ if (rt5682->mclk) {
+- init.parent_data = &(struct clk_parent_data){
++ parent_data = (struct clk_parent_data){
+ .fw_name = "mclk",
+ };
++ init.parent_data = &parent_data;
+ init.num_parents = 1;
+ }
+ break;
+ case RT5682_DAI_BCLK_IDX:
+ /* Make WCLK the parent of BCLK */
+- init.parent_hws = &(const struct clk_hw *){
+- &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
+- };
++ parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
++ init.parent_hws = &parent;
+ init.num_parents = 1;
+ break;
+ default:
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 4f568abd59e24..e63c6b723d76c 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -3256,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
+ int value = ucontrol->value.integer.value[0];
+ int sel;
+
++ if (wcd->comp_enabled[comp] == value)
++ return 0;
++
+ wcd->comp_enabled[comp] = value;
+ sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
+ WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
+@@ -3279,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
+ case COMPANDER_8:
+ break;
+ default:
+- break;
++ return 0;
+ }
+
+- return 0;
++ return 1;
+ }
+
+ static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
+@@ -3326,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
+ return 0;
+ }
+
++static int slim_rx_mux_to_dai_id(int mux)
++{
++ int aif_id;
++
++ switch (mux) {
++ case 1:
++ aif_id = AIF1_PB;
++ break;
++ case 2:
++ aif_id = AIF2_PB;
++ break;
++ case 3:
++ aif_id = AIF3_PB;
++ break;
++ case 4:
++ aif_id = AIF4_PB;
++ break;
++ default:
++ aif_id = -1;
++ break;
++ }
++
++ return aif_id;
++}
++
+ static int slim_rx_mux_put(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -3333,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
+ struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
+ struct soc_enum *e = (struct soc_enum *)kc->private_value;
+ struct snd_soc_dapm_update *update = NULL;
++ struct wcd934x_slim_ch *ch, *c;
+ u32 port_id = w->shift;
++ bool found = false;
++ int mux_idx;
++ int prev_mux_idx = wcd->rx_port_value[port_id];
++ int aif_id;
+
+- if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
+- return 0;
++ mux_idx = ucontrol->value.enumerated.item[0];
+
+- wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
++ if (mux_idx == prev_mux_idx)
++ return 0;
+
+- switch (wcd->rx_port_value[port_id]) {
++ switch(mux_idx) {
+ case 0:
+- list_del_init(&wcd->rx_chs[port_id].list);
+- break;
+- case 1:
+- list_add_tail(&wcd->rx_chs[port_id].list,
+- &wcd->dai[AIF1_PB].slim_ch_list);
+- break;
+- case 2:
+- list_add_tail(&wcd->rx_chs[port_id].list,
+- &wcd->dai[AIF2_PB].slim_ch_list);
+- break;
+- case 3:
+- list_add_tail(&wcd->rx_chs[port_id].list,
+- &wcd->dai[AIF3_PB].slim_ch_list);
++ aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
++ if (aif_id < 0)
++ return 0;
++
++ list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
++ if (ch->port == port_id + WCD934X_RX_START) {
++ found = true;
++ list_del_init(&ch->list);
++ break;
++ }
++ }
++ if (!found)
++ return 0;
++
+ break;
+- case 4:
+- list_add_tail(&wcd->rx_chs[port_id].list,
+- &wcd->dai[AIF4_PB].slim_ch_list);
++ case 1 ... 4:
++ aif_id = slim_rx_mux_to_dai_id(mux_idx);
++ if (aif_id < 0)
++ return 0;
++
++ if (list_empty(&wcd->rx_chs[port_id].list)) {
++ list_add_tail(&wcd->rx_chs[port_id].list,
++ &wcd->dai[aif_id].slim_ch_list);
++ } else {
++ dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
++ return 0;
++ }
+ break;
++
+ default:
+- dev_err(wcd->dev, "Unknown AIF %d\n",
+- wcd->rx_port_value[port_id]);
++ dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
+ goto err;
+ }
+
++ wcd->rx_port_value[port_id] = mux_idx;
+ snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
+ e, update);
+
+- return 0;
++ return 1;
+ err:
+ return -EINVAL;
+ }
+@@ -3815,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
+ struct soc_mixer_control *mixer =
+ (struct soc_mixer_control *)kc->private_value;
+ int enable = ucontrol->value.integer.value[0];
++ struct wcd934x_slim_ch *ch, *c;
+ int dai_id = widget->shift;
+ int port_id = mixer->shift;
+
+@@ -3822,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
+ if (enable == wcd->tx_port_value[port_id])
+ return 0;
+
+- wcd->tx_port_value[port_id] = enable;
+-
+- if (enable)
+- list_add_tail(&wcd->tx_chs[port_id].list,
+- &wcd->dai[dai_id].slim_ch_list);
+- else
+- list_del_init(&wcd->tx_chs[port_id].list);
++ if (enable) {
++ if (list_empty(&wcd->tx_chs[port_id].list)) {
++ list_add_tail(&wcd->tx_chs[port_id].list,
++ &wcd->dai[dai_id].slim_ch_list);
++ } else {
++ dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
++ return 0;
++ }
++ } else {
++ bool found = false;
++
++ list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
++ if (ch->port == port_id) {
++ found = true;
++ list_del_init(&wcd->tx_chs[port_id].list);
++ break;
++ }
++ }
++ if (!found)
++ return 0;
++ }
+
++ wcd->tx_port_value[port_id] = enable;
+ snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
+
+- return 0;
++ return 1;
+ }
+
+ static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 2da4a5fa7a18d..564b78f3cdd0a 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
+
+ usleep_range(1000, 1010);
+ }
+- return 0;
++
++ return 1;
+ }
+
+ static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
+@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int portidx = mixer->reg;
+
+- if (ucontrol->value.integer.value[0])
++ if (ucontrol->value.integer.value[0]) {
++ if (data->port_enable[portidx])
++ return 0;
++
+ data->port_enable[portidx] = true;
+- else
++ } else {
++ if (!data->port_enable[portidx])
++ return 0;
++
+ data->port_enable[portidx] = false;
++ }
+
+ if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
+ wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
+
+- return 0;
++ return 1;
+ }
+
+ static const char * const smart_boost_lvl_text[] = {
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
+index 243b8179e59df..18c90bb4922be 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -492,14 +492,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct session_data *session = &data->sessions[session_id];
+
+ if (ucontrol->value.integer.value[0]) {
++ if (session->port_id == be_id)
++ return 0;
++
+ session->port_id = be_id;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+ } else {
+- if (session->port_id == be_id) {
+- session->port_id = -1;
++ if (session->port_id == -1 || session->port_id != be_id)
+ return 0;
+- }
+
++ session->port_id = -1;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+ }
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index d489c1de3baec..823b6b8de942d 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
+
+
+ static const struct snd_djm_device snd_djm_devices[] = {
+- SND_DJM_DEVICE(250mk2),
+- SND_DJM_DEVICE(750),
+- SND_DJM_DEVICE(750mk2),
+- SND_DJM_DEVICE(850),
+- SND_DJM_DEVICE(900nxs2)
++ [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
++ [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
++ [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
++ [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
++ [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
+ };
+
+
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index 3dd2f68366f95..88dd7db55d385 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \
+ numa_num_possible_cpus \
+ libperl \
+ libpython \
+- libpython-version \
+ libslang \
+ libslang-include-subdir \
+ libtraceevent \
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index eff55d287db1f..e1e670014bd0c 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -32,7 +32,6 @@ FILES= \
+ test-numa_num_possible_cpus.bin \
+ test-libperl.bin \
+ test-libpython.bin \
+- test-libpython-version.bin \
+ test-libslang.bin \
+ test-libslang-include-subdir.bin \
+ test-libtraceevent.bin \
+@@ -223,9 +222,6 @@ $(OUTPUT)test-libperl.bin:
+ $(OUTPUT)test-libpython.bin:
+ $(BUILD) $(FLAGS_PYTHON_EMBED)
+
+-$(OUTPUT)test-libpython-version.bin:
+- $(BUILD)
+-
+ $(OUTPUT)test-libbfd.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+
+diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
+index 0b243ce842be3..5ffafb967b6e4 100644
+--- a/tools/build/feature/test-all.c
++++ b/tools/build/feature/test-all.c
+@@ -14,10 +14,6 @@
+ # include "test-libpython.c"
+ #undef main
+
+-#define main main_test_libpython_version
+-# include "test-libpython-version.c"
+-#undef main
+-
+ #define main main_test_libperl
+ # include "test-libperl.c"
+ #undef main
+@@ -177,7 +173,6 @@
+ int main(int argc, char *argv[])
+ {
+ main_test_libpython();
+- main_test_libpython_version();
+ main_test_libperl();
+ main_test_hello();
+ main_test_libelf();
+diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
+deleted file mode 100644
+index 47714b942d4d3..0000000000000
+--- a/tools/build/feature/test-libpython-version.c
++++ /dev/null
+@@ -1,11 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <Python.h>
+-
+-#if PY_VERSION_HEX >= 0x03000000
+- #error
+-#endif
+-
+-int main(void)
+-{
+- return 0;
+-}
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 14e3e8d702a02..3c077f61d676d 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -271,8 +271,6 @@ endif
+
+ FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
+-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
+-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+
+ FEATURE_CHECK_LDFLAGS-libaio = -lrt
+
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 5ab631702769b..b0034ee4bba50 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1204,61 +1204,69 @@ out_no_progress:
+
+ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
+ {
++ enum intel_pt_sample_type type = decoder->state.type;
+ bool ret = false;
+
++ decoder->state.type &= ~INTEL_PT_BRANCH;
++
+ if (decoder->set_fup_tx_flags) {
+ decoder->set_fup_tx_flags = false;
+ decoder->tx_flags = decoder->fup_tx_flags;
+- decoder->state.type = INTEL_PT_TRANSACTION;
++ decoder->state.type |= INTEL_PT_TRANSACTION;
+ if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
+ decoder->state.type |= INTEL_PT_BRANCH;
+- decoder->state.from_ip = decoder->ip;
+- decoder->state.to_ip = 0;
+ decoder->state.flags = decoder->fup_tx_flags;
+- return true;
++ ret = true;
+ }
+ if (decoder->set_fup_ptw) {
+ decoder->set_fup_ptw = false;
+- decoder->state.type = INTEL_PT_PTW;
++ decoder->state.type |= INTEL_PT_PTW;
+ decoder->state.flags |= INTEL_PT_FUP_IP;
+- decoder->state.from_ip = decoder->ip;
+- decoder->state.to_ip = 0;
+ decoder->state.ptw_payload = decoder->fup_ptw_payload;
+- return true;
++ ret = true;
+ }
+ if (decoder->set_fup_mwait) {
+ decoder->set_fup_mwait = false;
+- decoder->state.type = INTEL_PT_MWAIT_OP;
+- decoder->state.from_ip = decoder->ip;
+- decoder->state.to_ip = 0;
++ decoder->state.type |= INTEL_PT_MWAIT_OP;
+ decoder->state.mwait_payload = decoder->fup_mwait_payload;
+ ret = true;
+ }
+ if (decoder->set_fup_pwre) {
+ decoder->set_fup_pwre = false;
+ decoder->state.type |= INTEL_PT_PWR_ENTRY;
+- decoder->state.type &= ~INTEL_PT_BRANCH;
+- decoder->state.from_ip = decoder->ip;
+- decoder->state.to_ip = 0;
+ decoder->state.pwre_payload = decoder->fup_pwre_payload;
+ ret = true;
+ }
+ if (decoder->set_fup_exstop) {
+ decoder->set_fup_exstop = false;
+ decoder->state.type |= INTEL_PT_EX_STOP;
+- decoder->state.type &= ~INTEL_PT_BRANCH;
+ decoder->state.flags |= INTEL_PT_FUP_IP;
+- decoder->state.from_ip = decoder->ip;
+- decoder->state.to_ip = 0;
+ ret = true;
+ }
+ if (decoder->set_fup_bep) {
+ decoder->set_fup_bep = false;
+ decoder->state.type |= INTEL_PT_BLK_ITEMS;
+- decoder->state.type &= ~INTEL_PT_BRANCH;
++ ret = true;
++ }
++ if (decoder->overflow) {
++ decoder->overflow = false;
++ if (!ret && !decoder->pge) {
++ if (decoder->hop) {
++ decoder->state.type = 0;
++ decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
++ }
++ decoder->pge = true;
++ decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
++ decoder->state.from_ip = 0;
++ decoder->state.to_ip = decoder->ip;
++ return true;
++ }
++ }
++ if (ret) {
+ decoder->state.from_ip = decoder->ip;
+ decoder->state.to_ip = 0;
+- ret = true;
++ } else {
++ decoder->state.type = type;
+ }
+ return ret;
+ }
+@@ -1607,7 +1615,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
+ intel_pt_clear_tx_flags(decoder);
+ intel_pt_set_nr(decoder);
+ decoder->timestamp_insn_cnt = 0;
+- decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
++ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
++ decoder->state.from_ip = decoder->ip;
++ decoder->ip = 0;
++ decoder->pge = false;
++ decoder->set_fup_tx_flags = false;
++ decoder->set_fup_ptw = false;
++ decoder->set_fup_mwait = false;
++ decoder->set_fup_pwre = false;
++ decoder->set_fup_exstop = false;
++ decoder->set_fup_bep = false;
+ decoder->overflow = true;
+ return -EOVERFLOW;
+ }
+@@ -2665,6 +2682,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
+ /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
+ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
+ {
++ *err = 0;
++
+ /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
+ if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
+ *err = intel_pt_scan_for_psb(decoder);
+@@ -2677,6 +2696,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
+ return HOP_IGNORE;
+
+ case INTEL_PT_TIP_PGD:
++ decoder->pge = false;
+ if (!decoder->packet.count) {
+ intel_pt_set_nr(decoder);
+ return HOP_IGNORE;
+@@ -2704,18 +2724,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
+ if (!decoder->packet.count)
+ return HOP_IGNORE;
+ intel_pt_set_ip(decoder);
+- if (intel_pt_fup_event(decoder))
+- return HOP_RETURN;
+- if (!decoder->branch_enable)
++ if (decoder->set_fup_mwait || decoder->set_fup_pwre)
++ *no_tip = true;
++ if (!decoder->branch_enable || !decoder->pge)
+ *no_tip = true;
+ if (*no_tip) {
+ decoder->state.type = INTEL_PT_INSTRUCTION;
+ decoder->state.from_ip = decoder->ip;
+ decoder->state.to_ip = 0;
++ intel_pt_fup_event(decoder);
+ return HOP_RETURN;
+ }
++ intel_pt_fup_event(decoder);
++ decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
+ *err = intel_pt_walk_fup_tip(decoder);
+- if (!*err)
++ if (!*err && decoder->state.to_ip)
+ decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+ return HOP_RETURN;
+
+@@ -2896,7 +2919,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
+ {
+ struct intel_pt_psb_info data = { .fup = false };
+
+- if (!decoder->branch_enable || !decoder->pge)
++ if (!decoder->branch_enable)
+ return false;
+
+ intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
+@@ -2923,6 +2946,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
+ if (err)
+ return err;
+ next:
++ err = 0;
+ if (decoder->cyc_threshold) {
+ if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
+ decoder->sample_cyc = false;
+@@ -2961,6 +2985,7 @@ next:
+
+ case INTEL_PT_TIP_PGE: {
+ decoder->pge = true;
++ decoder->overflow = false;
+ intel_pt_mtc_cyc_cnt_pge(decoder);
+ intel_pt_set_nr(decoder);
+ if (decoder->packet.count == 0) {
+@@ -2998,7 +3023,7 @@ next:
+ break;
+ }
+ intel_pt_set_last_ip(decoder);
+- if (!decoder->branch_enable) {
++ if (!decoder->branch_enable || !decoder->pge) {
+ decoder->ip = decoder->last_ip;
+ if (intel_pt_fup_event(decoder))
+ return 0;
+@@ -3466,10 +3491,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
+ decoder->set_fup_pwre = false;
+ decoder->set_fup_exstop = false;
+ decoder->set_fup_bep = false;
++ decoder->overflow = false;
+
+ if (!decoder->branch_enable) {
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+- decoder->overflow = false;
+ decoder->state.type = 0; /* Do not have a sample */
+ return 0;
+ }
+@@ -3484,7 +3509,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
+ decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+ else
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+- decoder->overflow = false;
+
+ decoder->state.from_ip = 0;
+ decoder->state.to_ip = decoder->ip;
+@@ -3606,7 +3630,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
+ }
+
+ decoder->have_last_ip = true;
+- decoder->pkt_state = INTEL_PT_STATE_NO_IP;
++ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+
+ err = intel_pt_walk_psb(decoder);
+ if (err)
+@@ -3703,7 +3727,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+
+ if (err) {
+ decoder->state.err = intel_pt_ext_err(err);
+- decoder->state.from_ip = decoder->ip;
++ if (err != -EOVERFLOW)
++ decoder->state.from_ip = decoder->ip;
+ intel_pt_update_sample_time(decoder);
+ decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
+ intel_pt_set_nr(decoder);
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 6f852b305e92b..824bceb063bfe 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -2510,6 +2510,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ ptq->sync_switch = false;
+ intel_pt_next_tid(pt, ptq);
+ }
++ ptq->timestamp = state->est_timestamp;
+ if (pt->synth_opts.errors) {
+ err = intel_ptq_synth_error(ptq, state);
+ if (err)
+diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c
+index 20bacd5972ade..34f1b1b1176c7 100644
+--- a/tools/perf/util/smt.c
++++ b/tools/perf/util/smt.c
+@@ -15,7 +15,7 @@ int smt_on(void)
+ if (cached)
+ return cached_result;
+
+- if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
++ if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
+ goto done;
+
+ ncpu = sysconf(_SC_NPROCESSORS_CONF);
+diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
+index bfb97383e6b5a..b4ec228eb95d0 100644
+--- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
++++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
+@@ -35,7 +35,7 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+- "XDP pkt read, pkt_data' > pkt_end, good access",
++ "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+@@ -87,6 +87,41 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+@@ -106,16 +141,16 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_end > pkt_data', bad access 1",
++ "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -142,6 +177,42 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_end > pkt_data', corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+@@ -161,16 +232,16 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
++ "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -198,7 +269,43 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_end < pkt_data', good access",
++ "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end < pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+@@ -250,6 +357,41 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+@@ -268,15 +410,15 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
++ "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -304,7 +446,41 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_end >= pkt_data', good access",
++ "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+@@ -359,7 +535,44 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data' <= pkt_end, good access",
++ "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+@@ -413,6 +626,43 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+@@ -431,15 +681,15 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
++ "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -467,7 +717,41 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_meta' > pkt_data, good access",
++ "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++ offsetof(struct xdp_md, data_end)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+@@ -519,6 +803,41 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_data > pkt_meta', good access",
+ .insns = {
+@@ -538,16 +857,16 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
++ "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -574,6 +893,42 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_meta' < pkt_data, good access",
+ .insns = {
+@@ -593,16 +948,16 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
++ "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -630,7 +985,43 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data < pkt_meta', good access",
++ "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+@@ -682,6 +1073,41 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_meta' >= pkt_data, good access",
+ .insns = {
+@@ -700,15 +1126,15 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
++ "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -736,7 +1162,41 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data >= pkt_meta', good access",
++ "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+@@ -791,7 +1251,44 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_meta' <= pkt_data, good access",
++ "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+@@ -845,6 +1342,43 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
++ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .errstr = "R1 offset is outside of the packet",
++ .result = REJECT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+ {
+ "XDP pkt read, pkt_data <= pkt_meta', good access",
+ .insns = {
+@@ -863,15 +1397,15 @@
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
++ "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -898,3 +1432,37 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
++{
++ "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
++{
++ "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct xdp_md, data_meta)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_XDP,
++ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
++},
+diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
+index 010b59b139176..35314277fb586 100644
+--- a/tools/testing/selftests/kvm/include/kvm_util.h
++++ b/tools/testing/selftests/kvm/include/kvm_util.h
+@@ -69,6 +69,15 @@ enum vm_guest_mode {
+
+ #endif
+
++#if defined(__x86_64__)
++unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
++#else
++static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
++{
++ return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
++}
++#endif
++
+ #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
+ #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
+
+diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
+index 10a8ed691c669..c439fb653fde2 100644
+--- a/tools/testing/selftests/kvm/lib/kvm_util.c
++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
+@@ -307,7 +307,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+ (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+
+ /* Limit physical addresses to PA-bits. */
+- vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
++ vm->max_gfn = vm_compute_max_gfn(vm);
+
+ /* Allocate and setup memory for guest. */
+ vm->vpages_mapped = sparsebit_alloc();
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+index 28cb881f440d0..da73b97e1e6dc 100644
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+@@ -1433,3 +1433,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
+
+ return cpuid;
+ }
++
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
++
++static inline unsigned x86_family(unsigned int eax)
++{
++ unsigned int x86;
++
++ x86 = (eax >> 8) & 0xf;
++
++ if (x86 == 0xf)
++ x86 += (eax >> 20) & 0xff;
++
++ return x86;
++}
++
++unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
++{
++ const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
++ unsigned long ht_gfn, max_gfn, max_pfn;
++ uint32_t eax, ebx, ecx, edx, max_ext_leaf;
++
++ max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
++
++ /* Avoid reserved HyperTransport region on AMD processors. */
++ eax = ecx = 0;
++ cpuid(&eax, &ebx, &ecx, &edx);
++ if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
++ ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
++ edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
++ return max_gfn;
++
++ /* On parts with <40 physical address bits, the area is fully hidden */
++ if (vm->pa_bits < 40)
++ return max_gfn;
++
++ /* Before family 17h, the HyperTransport area is just below 1T. */
++ ht_gfn = (1 << 28) - num_ht_pages;
++ eax = 1;
++ cpuid(&eax, &ebx, &ecx, &edx);
++ if (x86_family(eax) < 0x17)
++ goto done;
++
++ /*
++ * Otherwise it's at the top of the physical address space, possibly
++ * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
++ * the old conservative value if MAXPHYADDR is not enumerated.
++ */
++ eax = 0x80000000;
++ cpuid(&eax, &ebx, &ecx, &edx);
++ max_ext_leaf = eax;
++ if (max_ext_leaf < 0x80000008)
++ goto done;
++
++ eax = 0x80000008;
++ cpuid(&eax, &ebx, &ecx, &edx);
++ max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
++ if (max_ext_leaf >= 0x8000001f) {
++ eax = 0x8000001f;
++ cpuid(&eax, &ebx, &ecx, &edx);
++ max_pfn >>= (ebx >> 6) & 0x3f;
++ }
++
++ ht_gfn = max_pfn - num_ht_pages;
++done:
++ return min(max_gfn, ht_gfn - 1);
++}
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 5abe92d55b696..996af1ae3d3dd 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -444,24 +444,63 @@ fib_rp_filter_test()
+ setup
+
+ set -e
++ ip netns add ns2
++ ip netns set ns2 auto
++
++ ip -netns ns2 link set dev lo up
++
++ $IP link add name veth1 type veth peer name veth2
++ $IP link set dev veth2 netns ns2
++ $IP address add 192.0.2.1/24 dev veth1
++ ip -netns ns2 address add 192.0.2.1/24 dev veth2
++ $IP link set dev veth1 up
++ ip -netns ns2 link set dev veth2 up
++
+ $IP link set dev lo address 52:54:00:6a:c7:5e
+- $IP link set dummy0 address 52:54:00:6a:c7:5e
+- $IP link add dummy1 type dummy
+- $IP link set dummy1 address 52:54:00:6a:c7:5e
+- $IP link set dev dummy1 up
++ $IP link set dev veth1 address 52:54:00:6a:c7:5e
++ ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
++ ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
++
++ # 1. (ns2) redirect lo's egress to veth2's egress
++ ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
++ ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
++ action mirred egress redirect dev veth2
++ ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
++ action mirred egress redirect dev veth2
++
++ # 2. (ns1) redirect veth1's ingress to lo's ingress
++ $NS_EXEC tc qdisc add dev veth1 ingress
++ $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
++ action mirred ingress redirect dev lo
++ $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
++ action mirred ingress redirect dev lo
++
++ # 3. (ns1) redirect lo's egress to veth1's egress
++ $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
++ $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
++ action mirred egress redirect dev veth1
++ $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
++ action mirred egress redirect dev veth1
++
++ # 4. (ns2) redirect veth2's ingress to lo's ingress
++ ip netns exec ns2 tc qdisc add dev veth2 ingress
++ ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
++ action mirred ingress redirect dev lo
++ ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
++ action mirred ingress redirect dev lo
++
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
+-
+- $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
+- $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
+- $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
++ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
++ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
++ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
+ set +e
+
+- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
++ run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
+ log_test $? 0 "rp_filter passes local packets"
+
+- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
++ run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
+ log_test $? 0 "rp_filter passes loopback packets"
+
+ cleanup
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 8748199ac1098..ffca314897c4c 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
+ nft_concat_range.sh nft_conntrack_helper.sh \
+ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+- ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
++ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
++ conntrack_vrf.sh
+
+ LDLIBS = -lmnl
+ TEST_GEN_FILES = nf-queue
+diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh
+new file mode 100644
+index 0000000000000..8b5ea92345882
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh
+@@ -0,0 +1,241 @@
++#!/bin/sh
++
++# This script demonstrates interaction of conntrack and vrf.
++# The vrf driver calls the netfilter hooks again, with oif/iif
++# pointing at the VRF device.
++#
++# For ingress, this means first iteration has iifname of lower/real
++# device. In this script, thats veth0.
++# Second iteration is iifname set to vrf device, tvrf in this script.
++#
++# For egress, this is reversed: first iteration has the vrf device,
++# second iteration is done with the lower/real/veth0 device.
++#
++# test_ct_zone_in demonstrates unexpected change of nftables
++# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack
++# connection on VRF rcv"
++#
++# It was possible to assign conntrack zone to a packet (or mark it for
++# `notracking`) in the prerouting chain before conntrack, based on real iif.
++#
++# After the change, the zone assignment is lost and the zone is assigned based
++# on the VRF master interface (in case such a rule exists).
++# assignment is lost. Instead, assignment based on the `iif` matching
++# Thus it is impossible to distinguish packets based on the original
++# interface.
++#
++# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem
++# that was supposed to be fixed by the commit mentioned above to make sure
++# that any fix to test case 1 won't break masquerade again.
++
++ksft_skip=4
++
++IP0=172.30.30.1
++IP1=172.30.30.2
++PFXL=30
++ret=0
++
++sfx=$(mktemp -u "XXXXXXXX")
++ns0="ns0-$sfx"
++ns1="ns1-$sfx"
++
++cleanup()
++{
++ ip netns pids $ns0 | xargs kill 2>/dev/null
++ ip netns pids $ns1 | xargs kill 2>/dev/null
++
++ ip netns del $ns0 $ns1
++}
++
++nft --version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not run test without nft tool"
++ exit $ksft_skip
++fi
++
++ip -Version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not run test without ip tool"
++ exit $ksft_skip
++fi
++
++ip netns add "$ns0"
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not create net namespace $ns0"
++ exit $ksft_skip
++fi
++ip netns add "$ns1"
++
++trap cleanup EXIT
++
++ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0
++ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
++ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
++
++ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not add veth device"
++ exit $ksft_skip
++fi
++
++ip -net $ns0 li add tvrf type vrf table 9876
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not add vrf device"
++ exit $ksft_skip
++fi
++
++ip -net $ns0 li set lo up
++
++ip -net $ns0 li set veth0 master tvrf
++ip -net $ns0 li set tvrf up
++ip -net $ns0 li set veth0 up
++ip -net $ns1 li set veth0 up
++
++ip -net $ns0 addr add $IP0/$PFXL dev veth0
++ip -net $ns1 addr add $IP1/$PFXL dev veth0
++
++ip netns exec $ns1 iperf3 -s > /dev/null 2>&1&
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not start iperf3"
++ exit $ksft_skip
++fi
++
++# test vrf ingress handling.
++# The incoming connection should be placed in conntrack zone 1,
++# as decided by the first iteration of the ruleset.
++test_ct_zone_in()
++{
++ip netns exec $ns0 nft -f - <<EOF
++table testct {
++ chain rawpre {
++ type filter hook prerouting priority raw;
++
++ iif { veth0, tvrf } counter meta nftrace set 1
++ iif veth0 counter ct zone set 1 counter return
++ iif tvrf counter ct zone set 2 counter return
++ ip protocol icmp counter
++ notrack counter
++ }
++
++ chain rawout {
++ type filter hook output priority raw;
++
++ oif veth0 counter ct zone set 1 counter return
++ oif tvrf counter ct zone set 2 counter return
++ notrack counter
++ }
++}
++EOF
++ ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null
++
++ # should be in zone 1, not zone 2
++ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l)
++ if [ $count -eq 1 ]; then
++ echo "PASS: entry found in conntrack zone 1"
++ else
++ echo "FAIL: entry not found in conntrack zone 1"
++ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l)
++ if [ $count -eq 1 ]; then
++ echo "FAIL: entry found in zone 2 instead"
++ else
++ echo "FAIL: entry not in zone 1 or 2, dumping table"
++ ip netns exec $ns0 conntrack -L
++ ip netns exec $ns0 nft list ruleset
++ fi
++ fi
++}
++
++# add masq rule that gets evaluated w. outif set to vrf device.
++# This tests the first iteration of the packet through conntrack,
++# oifname is the vrf device.
++test_masquerade_vrf()
++{
++ local qdisc=$1
++
++ if [ "$qdisc" != "default" ]; then
++ tc -net $ns0 qdisc add dev tvrf root $qdisc
++ fi
++
++ ip netns exec $ns0 conntrack -F 2>/dev/null
++
++ip netns exec $ns0 nft -f - <<EOF
++flush ruleset
++table ip nat {
++ chain rawout {
++ type filter hook output priority raw;
++
++ oif tvrf ct state untracked counter
++ }
++ chain postrouting2 {
++ type filter hook postrouting priority mangle;
++
++ oif tvrf ct state untracked counter
++ }
++ chain postrouting {
++ type nat hook postrouting priority 0;
++ # NB: masquerade should always be combined with 'oif(name) bla',
++ # lack of this is intentional here, we want to exercise double-snat.
++ ip saddr 172.30.30.0/30 counter masquerade random
++ }
++}
++EOF
++ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null
++ if [ $? -ne 0 ]; then
++ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device"
++ ret=1
++ return
++ fi
++
++ # must also check that nat table was evaluated on second (lower device) iteration.
++ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
++ ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
++ if [ $? -eq 0 ]; then
++ echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
++ else
++ echo "FAIL: vrf rules have unexpected counter value"
++ ret=1
++ fi
++
++ if [ "$qdisc" != "default" ]; then
++ tc -net $ns0 qdisc del dev tvrf root
++ fi
++}
++
++# add masq rule that gets evaluated w. outif set to veth device.
++# This tests the 2nd iteration of the packet through conntrack,
++# oifname is the lower device (veth0 in this case).
++test_masquerade_veth()
++{
++ ip netns exec $ns0 conntrack -F 2>/dev/null
++ip netns exec $ns0 nft -f - <<EOF
++flush ruleset
++table ip nat {
++ chain postrouting {
++ type nat hook postrouting priority 0;
++ meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random
++ }
++}
++EOF
++ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null
++ if [ $? -ne 0 ]; then
++ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device"
++ ret=1
++ return
++ fi
++
++ # must also check that nat table was evaluated on second (lower device) iteration.
++ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
++ if [ $? -eq 0 ]; then
++ echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device"
++ else
++ echo "FAIL: vrf masq rule has unexpected counter value"
++ ret=1
++ fi
++}
++
++test_ct_zone_in
++test_masquerade_vrf "default"
++test_masquerade_vrf "pfifo"
++test_masquerade_veth
++
++exit $ret