diff options
author | Mike Pagano <mpagano@gentoo.org> | 2020-12-21 08:26:48 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2020-12-21 08:26:48 -0500 |
commit | d4f2e5cd2f7ef9fea6d8978707a5862a4c13c5ba (patch) | |
tree | 3dfa3e8a6fa5ba0df6b62e67107d03b907ebb4c6 | |
parent | Linux patch 5.4.84 (diff) | |
download | linux-patches-d4f2e5cd2f7ef9fea6d8978707a5862a4c13c5ba.tar.gz linux-patches-d4f2e5cd2f7ef9fea6d8978707a5862a4c13c5ba.tar.bz2 linux-patches-d4f2e5cd2f7ef9fea6d8978707a5862a4c13c5ba.zip |
Linux patch 5.4.855.4-87
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1084_linux-5.4.85.patch | 1293 |
2 files changed, 1297 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 9a5258b0..377ed610 100644 --- a/0000_README +++ b/0000_README @@ -379,6 +379,10 @@ Patch: 1083_linux-5.4.84.patch From: http://www.kernel.org Desc: Linux 5.4.84 +Patch: 1084_linux-5.4.85.patch +From: http://www.kernel.org +Desc: Linux 5.4.85 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1084_linux-5.4.85.patch b/1084_linux-5.4.85.patch new file mode 100644 index 00000000..aaa46de0 --- /dev/null +++ b/1084_linux-5.4.85.patch @@ -0,0 +1,1293 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index fea15cd49fbc7..74ba077e99e56 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -5151,6 +5151,7 @@ + device); + j = NO_REPORT_LUNS (don't use report luns + command, uas only); ++ k = NO_SAME (do not use WRITE_SAME, uas only) + l = NOT_LOCKABLE (don't try to lock and + unlock ejectable media, not on uas); + m = MAX_SECTORS_64 (don't transfer more +diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt +index dadb29e8738fe..ec072c6bc03f8 100644 +--- a/Documentation/virt/kvm/mmu.txt ++++ b/Documentation/virt/kvm/mmu.txt +@@ -420,7 +420,7 @@ If the generation number of the spte does not equal the global generation + number, it will ignore the cached MMIO information and handle the page + fault through the slow path. + +-Since only 19 bits are used to store generation-number on mmio spte, all ++Since only 18 bits are used to store generation-number on mmio spte, all + pages are zapped when there is an overflow. + + Unfortunately, a single memory access might access kvm_memslots(kvm) multiple +diff --git a/Makefile b/Makefile +index fe0ab15cfd835..a2a2546fcda80 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 84 ++SUBLEVEL = 85 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h +index 17095435c875c..499cb2e727a09 100644 +--- a/arch/x86/kernel/cpu/resctrl/internal.h ++++ b/arch/x86/kernel/cpu/resctrl/internal.h +@@ -276,7 +276,6 @@ struct rftype { + * struct mbm_state - status for each MBM counter in each domain + * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes) + * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it +- * @chunks_bw Total local data moved. Used for bandwidth calculation + * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting + * @prev_bw The most recent bandwidth in MBps + * @delta_bw Difference between the current and previous bandwidth +@@ -285,7 +284,6 @@ struct rftype { + struct mbm_state { + u64 chunks; + u64 prev_msr; +- u64 chunks_bw; + u64 prev_bw_msr; + u32 prev_bw; + u32 delta_bw; +diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c +index 0cf4f87f60126..50f683ecd2c6c 100644 +--- a/arch/x86/kernel/cpu/resctrl/monitor.c ++++ b/arch/x86/kernel/cpu/resctrl/monitor.c +@@ -280,8 +280,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) + return; + + chunks = mbm_overflow_count(m->prev_bw_msr, tval); +- m->chunks_bw += chunks; +- m->chunks = m->chunks_bw; + cur_bw = (chunks * r->mon_scale) >> 20; + + if (m->delta_comp) +@@ -451,15 +449,14 @@ static void mbm_update(struct rdt_domain *d, int rmid) + } + if (is_mbm_local_enabled()) { + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; ++ __mon_event_count(rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ +- if (!is_mba_sc(NULL)) +- __mon_event_count(rmid, &rr); +- else ++ if (is_mba_sc(NULL)) + mbm_bw_count(rmid, &rr); + } + } +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index b90e8fd2f6ced..47c27c6e38426 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -407,11 +407,11 @@ static inline bool is_access_track_spte(u64 spte) + } + + /* +- * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of ++ * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of + * the memslots generation and is derived as follows: + * + * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 +- * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 ++ * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62 + * + * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in + * the MMIO generation number, as doing so would require stealing a bit from +@@ -420,18 +420,29 @@ static inline bool is_access_track_spte(u64 spte) + * requires a full MMU zap). The flag is instead explicitly queried when + * checking for MMIO spte cache hits. + */ +-#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0) + + #define MMIO_SPTE_GEN_LOW_START 3 + #define MMIO_SPTE_GEN_LOW_END 11 +-#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ +- MMIO_SPTE_GEN_LOW_START) + + #define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT + #define MMIO_SPTE_GEN_HIGH_END 62 ++ ++#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ ++ MMIO_SPTE_GEN_LOW_START) + #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ + MMIO_SPTE_GEN_HIGH_START) + ++#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) ++#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) ++ ++/* remember to adjust the comment above as well if you change these */ ++static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9); ++ ++#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) ++#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) ++ ++#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) ++ + static u64 generation_mmio_spte_mask(u64 gen) + { + u64 mask; +@@ -439,8 +450,8 @@ static u64 generation_mmio_spte_mask(u64 gen) + WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); + BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); + +- mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; +- mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; ++ mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; ++ mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; + return mask; + } + +@@ -448,8 +459,8 @@ static u64 get_mmio_spte_generation(u64 spte) + { + u64 gen; + +- gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; +- gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; ++ gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; ++ gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; + return gen; + } + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +index fcb52efec0753..89121d7ce3e6f 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +@@ -141,8 +141,8 @@ static const struct { + { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, + { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, + { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, +- { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, +- { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, ++ { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" }, ++ { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" }, + { ENETC_PM0_ROVR, "MAC rx oversized packets" }, + { ENETC_PM0_RJBR, "MAC rx jabber packets" }, + { ENETC_PM0_RFRG, "MAC rx fragment packets" }, +@@ -161,9 +161,13 @@ static const struct { + { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, + { ENETC_PM0_TPKT, "MAC tx packets" }, + { ENETC_PM0_TUND, "MAC tx undersized packets" }, ++ { ENETC_PM0_T64, "MAC tx 64 byte packets" }, + { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, ++ { ENETC_PM0_T255, "MAC tx 128-255 byte packets" }, ++ { ENETC_PM0_T511, "MAC tx 256-511 byte packets" }, + { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, +- { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, ++ { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" }, ++ { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" }, + { ENETC_PM0_TCNP, "MAC tx control packets" }, + { ENETC_PM0_TDFR, "MAC tx deferred packets" }, + { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +index 88276299f4473..7428f62408a20 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h ++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +@@ -239,8 +239,8 @@ enum enetc_bdr_type {TX, RX}; + #define ENETC_PM0_R255 0x8180 + #define ENETC_PM0_R511 0x8188 + #define ENETC_PM0_R1023 0x8190 +-#define ENETC_PM0_R1518 0x8198 +-#define ENETC_PM0_R1519X 0x81A0 ++#define ENETC_PM0_R1522 0x8198 ++#define ENETC_PM0_R1523X 0x81A0 + #define ENETC_PM0_ROVR 0x81A8 + #define ENETC_PM0_RJBR 0x81B0 + #define ENETC_PM0_RFRG 0x81B8 +@@ -259,9 +259,13 @@ enum enetc_bdr_type {TX, RX}; + #define ENETC_PM0_TBCA 0x8250 + #define ENETC_PM0_TPKT 0x8260 + #define ENETC_PM0_TUND 0x8268 ++#define ENETC_PM0_T64 0x8270 + #define ENETC_PM0_T127 0x8278 ++#define ENETC_PM0_T255 0x8280 ++#define ENETC_PM0_T511 0x8288 + #define ENETC_PM0_T1023 0x8290 +-#define ENETC_PM0_T1518 0x8298 ++#define ENETC_PM0_T1522 0x8298 ++#define ENETC_PM0_T1523X 0x82A0 + #define ENETC_PM0_TCNP 0x82C0 + #define ENETC_PM0_TDFR 0x82D0 + #define ENETC_PM0_TMCOL 0x82D8 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +index 38b79321c4c44..de69ebf688577 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +@@ -35,8 +35,6 @@ + + #define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +-#pragma pack(1) +- + struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; +@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info { + struct hclge_dbg_reg_common_msg reg_msg; + }; + +-#pragma pack() +- + static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index 70fd246840e21..b5eb116249dda 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -1383,8 +1383,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) + } + + priv->port_stats.tx_timeout++; +- en_dbg(DRV, priv, "Scheduling watchdog\n"); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { ++ en_dbg(DRV, priv, "Scheduling port restart\n"); ++ queue_work(mdev->workqueue, &priv->restart_task); ++ } + } + + +@@ -1738,6 +1740,7 @@ int mlx4_en_start_port(struct net_device *dev) + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } ++ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state); + if (t != TX_XDP) { + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + tx_ring->recycle_ring = NULL; +@@ -1834,6 +1837,7 @@ int mlx4_en_start_port(struct net_device *dev) + local_bh_enable(); + } + ++ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); + netif_tx_start_all_queues(dev); + netif_device_attach(dev); + +@@ -2004,7 +2008,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) + static void mlx4_en_restart(struct work_struct *work) + { + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, +- watchdog_task); ++ restart_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + +@@ -2386,7 +2390,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) + if (netif_running(dev)) { + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { +- /* NIC is probably restarting - let watchdog task reset ++ /* NIC is probably restarting - let restart task reset + * the port */ + en_dbg(DRV, priv, "Change MTU called with card down!?\n"); + } else { +@@ -2395,7 +2399,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) + if (err) { + en_err(priv, "Failed restarting port:%d\n", + priv->port); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, ++ &priv->state)) ++ queue_work(mdev->workqueue, &priv->restart_task); + } + } + mutex_unlock(&mdev->state_lock); +@@ -2865,7 +2871,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) + if (err) { + en_err(priv, "Failed starting port %d for XDP change\n", + priv->port); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) ++ queue_work(mdev->workqueue, &priv->restart_task); + } + } + +@@ -3263,7 +3270,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); +- INIT_WORK(&priv->watchdog_task, mlx4_en_restart); ++ INIT_WORK(&priv->restart_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +index 191ead7a7fa59..605c079d48417 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) + return cnt; + } + ++static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe, ++ u16 cqe_index, struct mlx4_en_tx_ring *ring) ++{ ++ struct mlx4_en_dev *mdev = priv->mdev; ++ struct mlx4_en_tx_info *tx_info; ++ struct mlx4_en_tx_desc *tx_desc; ++ u16 wqe_index; ++ int desc_size; ++ ++ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n", ++ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); ++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), ++ false); ++ ++ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; ++ tx_info = &ring->tx_info[wqe_index]; ++ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE; ++ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, ++ wqe_index, desc_size); ++ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); ++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); ++ ++ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) ++ return; ++ ++ en_err(priv, "Scheduling port restart\n"); ++ queue_work(mdev->workqueue, &priv->restart_task); ++} ++ + bool mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq, int napi_budget) + { +@@ -438,13 +467,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, + dma_rmb(); + + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == +- MLX4_CQE_OPCODE_ERROR)) { +- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; +- +- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", +- cqe_err->vendor_err_syndrome, +- cqe_err->syndrome); +- } ++ MLX4_CQE_OPCODE_ERROR)) ++ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) ++ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index, ++ ring); + + /* Skip over last polled CQE */ + new_index = be16_to_cpu(cqe->wqe_index) & size_mask; +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +index 630f15977f091..a2f69c6f0c79f 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -271,6 +271,10 @@ struct mlx4_en_page_cache { + } buf[MLX4_EN_CACHE_SIZE]; + }; + ++enum { ++ MLX4_EN_TX_RING_STATE_RECOVERING, ++}; ++ + struct mlx4_en_priv; + + struct mlx4_en_tx_ring { +@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring { + * Only queue_stopped might be used if BQL is not properly working. + */ + unsigned long queue_stopped; ++ unsigned long state; + struct mlx4_hwq_resources sp_wqres; + struct mlx4_qp sp_qp; + struct mlx4_qp_context sp_context; +@@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap { + struct mutex mutex; /* for mutual access to stats bitmap */ + }; + ++enum { ++ MLX4_EN_STATE_FLAG_RESTARTING, ++}; ++ + struct mlx4_en_priv { + struct mlx4_en_dev *mdev; + struct mlx4_en_port_profile *prof; +@@ -595,7 +604,7 @@ struct mlx4_en_priv { + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; + struct mlx4_qp drop_qp; + struct work_struct rx_mode_task; +- struct work_struct watchdog_task; ++ struct work_struct restart_task; + struct work_struct linkstate_task; + struct delayed_work stats_task; + struct delayed_work service_task; +@@ -643,6 +652,7 @@ struct mlx4_en_priv { + u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; ++ unsigned long state; + }; + + enum mlx4_en_wol { +diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c +index 3a0b289d97719..eedec13460787 100644 +--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c ++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c +@@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, + + wol->supported = 0; + wol->wolopts = 0; +- phy_ethtool_get_wol(netdev->phydev, wol); ++ ++ if (netdev->phydev) ++ phy_ethtool_get_wol(netdev->phydev, wol); + + wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | + WAKE_MAGIC | WAKE_PHY | WAKE_ARP; +@@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, + + device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); + +- phy_ethtool_set_wol(netdev->phydev, wol); +- +- return 0; ++ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) ++ : -ENETDOWN; + } + #endif /* CONFIG_PM */ + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +index d1d6ba9cdccdd..2788d4c5b1926 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +@@ -29,7 +29,6 @@ + #define PRG_ETH0_EXT_RMII_MODE 4 + + /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ +-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 + #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) + + #define PRG_ETH0_TXDLY_SHIFT 5 +@@ -143,8 +142,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) + } + + clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; +- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; +- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; ++ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK); ++ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >> ++ clk_configs->m250_mux.shift; + clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names, + MUX_CLK_NUM_PARENTS, &clk_mux_ops, + &clk_configs->m250_mux.hw); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 189cdb7633671..18c5a9bb6759c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1441,6 +1441,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) + stmmac_free_tx_buffer(priv, queue, i); + } + ++/** ++ * stmmac_free_tx_skbufs - free TX skb buffers ++ * @priv: private structure ++ */ ++static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) ++{ ++ u32 tx_queue_cnt = priv->plat->tx_queues_to_use; ++ u32 queue; ++ ++ for (queue = 0; queue < tx_queue_cnt; queue++) ++ dma_free_tx_skbufs(priv, queue); ++} ++ + /** + * free_dma_rx_desc_resources - free RX dma desc resources + * @priv: private structure +@@ -2745,9 +2758,6 @@ static int stmmac_release(struct net_device *dev) + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + +- if (priv->eee_enabled) +- del_timer_sync(&priv->eee_ctrl_timer); +- + /* Stop and disconnect the PHY */ + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); +@@ -2764,6 +2774,11 @@ static int stmmac_release(struct net_device *dev) + if (priv->lpi_irq > 0) + free_irq(priv->lpi_irq, dev); + ++ if (priv->eee_enabled) { ++ priv->tx_path_in_lpi_mode = false; ++ del_timer_sync(&priv->eee_ctrl_timer); ++ } ++ + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_stop_all_dma(priv); + +@@ -4748,6 +4763,11 @@ int stmmac_suspend(struct device *dev) + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + ++ if (priv->eee_enabled) { ++ priv->tx_path_in_lpi_mode = false; ++ del_timer_sync(&priv->eee_ctrl_timer); ++ } ++ + /* Stop TX/RX DMA */ + stmmac_stop_all_dma(priv); + +@@ -4846,6 +4866,7 @@ int stmmac_resume(struct device *dev) + + stmmac_reset_queues_param(priv); + ++ stmmac_free_tx_skbufs(priv); + stmmac_clear_descriptors(priv); + + stmmac_hw_setup(ndev, false); +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index eb480204cdbeb..5b8451c58aa4c 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -1425,9 +1425,7 @@ static int temac_probe(struct platform_device *pdev) + of_node_put(dma_np); + } else if (pdata) { + /* 2nd memory resource specifies DMA registers */ +- res = platform_get_resource(pdev, IORESOURCE_MEM, 1); +- lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, +- resource_size(res)); ++ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(lp->sdma_regs)) { + dev_err(&pdev->dev, + "could not map DMA registers\n"); +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index 0c7d746c03304..14dfb92783456 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -1038,11 +1038,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, + int orig_iif = skb->skb_iif; + bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); + bool is_ndisc = ipv6_ndisc_frame(skb); ++ bool is_ll_src; + + /* loopback, multicast & non-ND link-local traffic; do not push through +- * packet taps again. Reset pkt_type for upper layers to process skb ++ * packet taps again. Reset pkt_type for upper layers to process skb. ++ * for packets with lladdr src, however, skip so that the dst can be ++ * determine at input using original ifindex in the case that daddr ++ * needs strict + */ +- if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { ++ is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; ++ if (skb->pkt_type == PACKET_LOOPBACK || ++ (need_strict && !is_ndisc && !is_ll_src)) { + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + IP6CB(skb)->flags |= IP6SKB_L3SLAVE; +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index f2c6d9d3bb28f..efe793a2fc65d 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -170,11 +170,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up, + struct omap8250_priv *priv) + { + u8 timeout = 255; +- u8 old_mdr1; +- +- old_mdr1 = serial_in(up, UART_OMAP_MDR1); +- if (old_mdr1 == priv->mdr1) +- return; + + serial_out(up, UART_OMAP_MDR1, priv->mdr1); + udelay(2); +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index b55c3a699fc65..c1592403222f5 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -342,6 +342,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x06a3, 0x0006), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + ++ /* Agfa SNAPSCAN 1212U */ ++ { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME }, ++ + /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */ + { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index 4c9d1e49d5ed1..a49c27b2ba16d 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -2733,7 +2733,7 @@ static int __init init(void) + { + int retval = -ENOMEM; + int i; +- struct dummy *dum[MAX_NUM_UDC]; ++ struct dummy *dum[MAX_NUM_UDC] = {}; + + if (usb_disabled()) + return -ENODEV; +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 933936abb6fb7..1a274f8a5bf11 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -1705,6 +1705,10 @@ retry: + hcd->state = HC_STATE_SUSPENDED; + bus_state->next_statechange = jiffies + msecs_to_jiffies(10); + spin_unlock_irqrestore(&xhci->lock, flags); ++ ++ if (bus_state->bus_suspended) ++ usleep_range(5000, 10000); ++ + return 0; + } + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 3c90c14390d60..d08b0079eecb1 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -45,6 +45,7 @@ + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 + #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5 + #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6 ++#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1 + #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db + #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9 +@@ -220,6 +221,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI || +diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig +index 9b632ab24f033..df7404c526c8b 100644 +--- a/drivers/usb/misc/sisusbvga/Kconfig ++++ b/drivers/usb/misc/sisusbvga/Kconfig +@@ -16,7 +16,7 @@ config USB_SISUSBVGA + + config USB_SISUSBVGA_CON + bool "Text console and mode switching support" if USB_SISUSBVGA +- depends on VT ++ depends on VT && BROKEN + select FONT_8x16 + ---help--- + Say Y here if you want a VGA text console via the USB dongle or +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c +index 1ec1baa7604ed..678903d1ce4da 100644 +--- a/drivers/usb/storage/uas.c ++++ b/drivers/usb/storage/uas.c +@@ -867,6 +867,9 @@ static int uas_slave_configure(struct scsi_device *sdev) + if (devinfo->flags & US_FL_NO_READ_CAPACITY_16) + sdev->no_read_capacity_16 = 1; + ++ /* Some disks cannot handle WRITE_SAME */ ++ if (devinfo->flags & US_FL_NO_SAME) ++ sdev->no_write_same = 1; + /* + * Some disks return the total number of blocks in response + * to READ CAPACITY rather than the highest block number. +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index dcdfcdfd2ad13..749c69be091cc 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -35,12 +35,15 @@ UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + +-/* Reported-by: Julian Groß <julian.g@posteo.de> */ ++/* ++ * Initially Reported-by: Julian Groß <julian.g@posteo.de> ++ * Further reports David C. Partridge <david.partridge@perdrix.co.uk> ++ */ + UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, +- US_FL_NO_REPORT_OPCODES), ++ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), + + /* + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c +index 9a79cd9762f31..2349dfa3b1762 100644 +--- a/drivers/usb/storage/usb.c ++++ b/drivers/usb/storage/usb.c +@@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) + case 'j': + f |= US_FL_NO_REPORT_LUNS; + break; ++ case 'k': ++ f |= US_FL_NO_SAME; ++ break; + case 'l': + f |= US_FL_NOT_LOCKABLE; + break; +diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h +index 000a5954b2e89..a7f7ebdd3069e 100644 +--- a/include/linux/usb_usual.h ++++ b/include/linux/usb_usual.h +@@ -84,6 +84,8 @@ + /* Cannot handle REPORT_LUNS */ \ + US_FLAG(ALWAYS_SYNC, 0x20000000) \ + /* lies about caching, so always sync */ \ ++ US_FLAG(NO_SAME, 0x40000000) \ ++ /* Cannot handle WRITE_SAME */ \ + + #define US_FLAG(name, value) US_FL_##name = value , + enum { US_DO_ALL_FLAGS }; +diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h +index a71b6e3b03ebc..83ee45fa634b9 100644 +--- a/include/uapi/linux/ptrace.h ++++ b/include/uapi/linux/ptrace.h +@@ -81,7 +81,8 @@ struct seccomp_metadata { + + struct ptrace_syscall_info { + __u8 op; /* PTRACE_SYSCALL_INFO_* */ +- __u32 arch __attribute__((__aligned__(sizeof(__u32)))); ++ __u8 pad[3]; ++ __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { +diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c +index 168479a7d61b8..be0ca3306be8c 100644 +--- a/kernel/sched/membarrier.c ++++ b/kernel/sched/membarrier.c +@@ -30,6 +30,23 @@ static void ipi_mb(void *info) + smp_mb(); /* IPIs should be serializing but paranoid. */ + } + ++static void ipi_sync_core(void *info) ++{ ++ /* ++ * The smp_mb() in membarrier after all the IPIs is supposed to ++ * ensure that memory on remote CPUs that occur before the IPI ++ * become visible to membarrier()'s caller -- see scenario B in ++ * the big comment at the top of this file. ++ * ++ * A sync_core() would provide this guarantee, but ++ * sync_core_before_usermode() might end up being deferred until ++ * after membarrier()'s smp_mb(). ++ */ ++ smp_mb(); /* IPIs should be serializing but paranoid. */ ++ ++ sync_core_before_usermode(); ++} ++ + static void ipi_sync_rq_state(void *info) + { + struct mm_struct *mm = (struct mm_struct *) info; +@@ -134,6 +151,7 @@ static int membarrier_private_expedited(int flags) + int cpu; + cpumask_var_t tmpmask; + struct mm_struct *mm = current->mm; ++ smp_call_func_t ipi_func = ipi_mb; + + if (flags & MEMBARRIER_FLAG_SYNC_CORE) { + if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE)) +@@ -141,6 +159,7 @@ static int membarrier_private_expedited(int flags) + if (!(atomic_read(&mm->membarrier_state) & + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) + return -EPERM; ++ ipi_func = ipi_sync_core; + } else { + if (!(atomic_read(&mm->membarrier_state) & + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) +@@ -181,7 +200,7 @@ static int membarrier_private_expedited(int flags) + rcu_read_unlock(); + + preempt_disable(); +- smp_call_function_many(tmpmask, ipi_mb, NULL, 1); ++ smp_call_function_many(tmpmask, ipi_func, NULL, 1); + preempt_enable(); + + free_cpumask_var(tmpmask); +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c +index b99efa42e81dc..0dd8984a261da 100644 +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -168,6 +168,9 @@ static int br_dev_open(struct net_device *dev) + br_stp_enable_bridge(br); + br_multicast_open(br); + ++ if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) ++ br_multicast_join_snoopers(br); ++ + return 0; + } + +@@ -188,6 +191,9 @@ static int br_dev_stop(struct net_device *dev) + br_stp_disable_bridge(br); + br_multicast_stop(br); + ++ if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) ++ br_multicast_leave_snoopers(br); ++ + netif_stop_queue(dev); + + return 0; +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index ee2902b51d45a..066cd3c59cfdb 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1848,7 +1848,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) + } + #endif + +-static void br_multicast_join_snoopers(struct net_bridge *br) ++void br_multicast_join_snoopers(struct net_bridge *br) + { + br_ip4_multicast_join_snoopers(br); + br_ip6_multicast_join_snoopers(br); +@@ -1879,7 +1879,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) + } + #endif + +-static void br_multicast_leave_snoopers(struct net_bridge *br) ++void br_multicast_leave_snoopers(struct net_bridge *br) + { + br_ip4_multicast_leave_snoopers(br); + br_ip6_multicast_leave_snoopers(br); +@@ -1898,9 +1898,6 @@ static void __br_multicast_open(struct net_bridge *br, + + void br_multicast_open(struct net_bridge *br) + { +- if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) +- br_multicast_join_snoopers(br); +- + __br_multicast_open(br, &br->ip4_own_query); + #if IS_ENABLED(CONFIG_IPV6) + __br_multicast_open(br, &br->ip6_own_query); +@@ -1916,9 +1913,6 @@ void br_multicast_stop(struct net_bridge *br) + del_timer_sync(&br->ip6_other_query.timer); + del_timer_sync(&br->ip6_own_query.timer); + #endif +- +- if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) +- br_multicast_leave_snoopers(br); + } + + void br_multicast_dev_del(struct net_bridge *br) +@@ -2049,6 +2043,7 @@ static void br_multicast_start_querier(struct net_bridge *br, + int br_multicast_toggle(struct net_bridge *br, unsigned long val) + { + struct net_bridge_port *port; ++ bool change_snoopers = false; + + spin_lock_bh(&br->multicast_lock); + if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) +@@ -2057,7 +2052,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) + br_mc_disabled_update(br->dev, val); + br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { +- br_multicast_leave_snoopers(br); ++ change_snoopers = true; + goto unlock; + } + +@@ -2068,9 +2063,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) + list_for_each_entry(port, &br->port_list, list) + __br_multicast_enable_port(port); + ++ change_snoopers = true; ++ + unlock: + spin_unlock_bh(&br->multicast_lock); + ++ /* br_multicast_join_snoopers has the potential to cause ++ * an MLD Report/Leave to be delivered to br_multicast_rcv, ++ * which would in turn call br_multicast_add_group, which would ++ * attempt to acquire multicast_lock. This function should be ++ * called after the lock has been released to avoid deadlocks on ++ * multicast_lock. ++ * ++ * br_multicast_leave_snoopers does not have the problem since ++ * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and ++ * returns without calling br_multicast_ipv4/6_rcv if it's not ++ * enabled. Moved both functions out just for symmetry. ++ */ ++ if (change_snoopers) { ++ if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) ++ br_multicast_join_snoopers(br); ++ else ++ br_multicast_leave_snoopers(br); ++ } ++ + return 0; + } + +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index cecb4223440e7..7615c2210e0da 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -665,6 +665,8 @@ void br_multicast_del_port(struct net_bridge_port *port); + void br_multicast_enable_port(struct net_bridge_port *port); + void br_multicast_disable_port(struct net_bridge_port *port); + void br_multicast_init(struct net_bridge *br); ++void br_multicast_join_snoopers(struct net_bridge *br); ++void br_multicast_leave_snoopers(struct net_bridge *br); + void br_multicast_open(struct net_bridge *br); + void br_multicast_stop(struct net_bridge *br); + void br_multicast_dev_del(struct net_bridge *br); +@@ -792,6 +794,14 @@ static inline void br_multicast_init(struct net_bridge *br) + { + } + ++static inline void br_multicast_join_snoopers(struct net_bridge *br) ++{ ++} ++ ++static inline void br_multicast_leave_snoopers(struct net_bridge *br) ++{ ++} ++ + static inline void br_multicast_open(struct net_bridge *br) + { + } +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c +index 48413b5eb61fc..9257292bd1aed 100644 +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -260,8 +260,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, + } + + masterv = br_vlan_get_master(br, v->vid, extack); +- if (!masterv) ++ if (!masterv) { ++ err = -ENOMEM; + goto out_filt; ++ } + v->brvlan = masterv; + if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { + v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index ed2ab03cf971c..da994f7e3def9 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -835,7 +835,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, + if (has_gw && has_via) { + NL_SET_ERR_MSG(extack, + "Nexthop configuration can not contain both GATEWAY and VIA"); +- goto errout; ++ return -EINVAL; + } + + return 0; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 54fd6bc5adcca..adace90f49fac 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -446,7 +446,6 @@ void tcp_init_buffer_space(struct sock *sk) + if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) + tcp_sndbuf_expand(sk); + +- tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); + tcp_mstamp_refresh(tp); + tp->rcvq_space.time = tp->tcp_mstamp; + tp->rcvq_space.seq = tp->copied_seq; +@@ -470,6 +469,8 @@ void tcp_init_buffer_space(struct sock *sk) + + tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); + tp->snd_cwnd_stamp = tcp_jiffies32; ++ tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, ++ (u32)TCP_INIT_CWND * tp->advmss); + } + + /* 4. Recalculate window clamp after socket hit its memory bounds. */ +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 4407193bd7029..5e311e6a31d51 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1654,7 +1654,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) + * window, and remember whether we were cwnd-limited then. + */ + if (!before(tp->snd_una, tp->max_packets_seq) || +- tp->packets_out > tp->max_packets_out) { ++ tp->packets_out > tp->max_packets_out || ++ is_cwnd_limited) { + tp->max_packets_out = tp->packets_out; + tp->max_packets_seq = tp->snd_nxt; + tp->is_cwnd_limited = is_cwnd_limited; +@@ -2476,6 +2477,10 @@ repair: + else + tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); + ++ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); ++ if (likely(sent_pkts || is_cwnd_limited)) ++ tcp_cwnd_validate(sk, is_cwnd_limited); ++ + if (likely(sent_pkts)) { + if (tcp_in_cwnd_reduction(sk)) + tp->prr_out += sent_pkts; +@@ -2483,8 +2488,6 @@ repair: + /* Send one loss probe per tail loss episode. */ + if (push_one != 2) + tcp_schedule_loss_probe(sk, false); +- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); +- tcp_cwnd_validate(sk, is_cwnd_limited); + return false; + } + return !tp->packets_out && !tcp_write_queue_empty(sk); +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 5d016bbdf16e8..c7ff200d0bd41 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -2117,7 +2117,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + __skb_pull(skb, skb_transport_offset(skb)); + ret = udp_queue_rcv_one_skb(sk, skb); + if (ret > 0) +- ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); ++ ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); + } + return 0; + } +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c +index aca608ae313fe..1708b64d41094 100644 +--- a/net/mac80211/mesh_pathtbl.c ++++ b/net/mac80211/mesh_pathtbl.c +@@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void) + atomic_set(&newtbl->entries, 0); + spin_lock_init(&newtbl->gates_lock); + spin_lock_init(&newtbl->walk_lock); ++ rhashtable_init(&newtbl->rhead, &mesh_rht_params); + + return newtbl; + } +@@ -775,9 +776,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) + goto free_path; + } + +- rhashtable_init(&tbl_path->rhead, &mesh_rht_params); +- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); +- + sdata->u.mesh.mesh_paths = tbl_path; + sdata->u.mesh.mpp_paths = tbl_mpp; + +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index f57c610d75237..46004e329a24a 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1934,11 +1934,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int + static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) + { + struct snd_pcm_runtime *runtime; ++ int fragshift; + + runtime = substream->runtime; + if (runtime->oss.subdivision || runtime->oss.fragshift) + return -EINVAL; +- runtime->oss.fragshift = val & 0xffff; ++ fragshift = val & 0xffff; ++ if (fragshift >= 31) ++ return -EINVAL; ++ runtime->oss.fragshift = fragshift; + runtime->oss.maxfrags = (val >> 16) & 0xffff; + if (runtime->oss.fragshift < 4) /* < 16 */ + runtime->oss.fragshift = 4; +diff --git a/sound/usb/format.c b/sound/usb/format.c +index 1f9ea513230a6..9e9d4c10dfac6 100644 +--- a/sound/usb/format.c ++++ b/sound/usb/format.c +@@ -40,6 +40,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, + case UAC_VERSION_1: + default: { + struct uac_format_type_i_discrete_descriptor *fmt = _fmt; ++ if (format >= 64) ++ return 0; /* invalid format */ + sample_width = fmt->bBitResolution; + sample_bytes = fmt->bSubframeSize; + format = 1ULL << format; +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index d01edd5da6cf8..c5cbba9fdf0da 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -193,16 +193,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol, + struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); + struct snd_usb_substream *subs = info->private_data; + struct snd_pcm_chmap_elem *chmap = NULL; +- int i; ++ int i = 0; + +- memset(ucontrol->value.integer.value, 0, +- sizeof(ucontrol->value.integer.value)); + if (subs->cur_audiofmt) + chmap = subs->cur_audiofmt->chmap; + if (chmap) { + for (i = 0; i < chmap->channels; i++) + ucontrol->value.integer.value[i] = chmap->map[i]; + } ++ for (; i < subs->channels_max; i++) ++ ucontrol->value.integer.value[i] = 0; + return 0; + } + +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl +index 6c4f21db5fbad..fe587a9594634 100755 +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -4197,7 +4197,12 @@ sub do_send_mail { + $mail_command =~ s/\$SUBJECT/$subject/g; + $mail_command =~ s/\$MESSAGE/$message/g; + +- run_command $mail_command; ++ my $ret = run_command $mail_command; ++ if (!$ret && defined($file)) { ++ # try again without the file ++ $message .= "\n\n*** FAILED TO SEND LOG ***\n\n"; ++ do_send_email($subject, $message); ++ } + } + + sub send_email { +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc +index a753c73d869ab..0f60087583d8f 100644 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc +@@ -11,16 +11,12 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported + :;: "user-memory access syntax and ustring working on user memory";: + echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \ + > kprobe_events +-echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \ +- >> kprobe_events + + grep myevent kprobe_events | \ + grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string' + echo 1 > events/kprobes/myevent/enable +-echo 1 > events/kprobes/myevent2/enable + echo > /dev/null + echo 0 > events/kprobes/myevent/enable +-echo 0 > events/kprobes/myevent2/enable + + grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"' + +diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh +index 9fd3a0b97f0db..38133da2973d4 100755 +--- a/tools/testing/selftests/net/fcnal-test.sh ++++ b/tools/testing/selftests/net/fcnal-test.sh +@@ -239,6 +239,28 @@ setup_cmd_nsb() + fi + } + ++setup_cmd_nsc() ++{ ++ local cmd="$*" ++ local rc ++ ++ run_cmd_nsc ${cmd} ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ # show user the command if not done so already ++ if [ "$VERBOSE" = "0" ]; then ++ echo "setup command: $cmd" ++ fi ++ echo "failed. stopping tests" ++ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then ++ echo ++ echo "hit enter to continue" ++ read a ++ fi ++ exit $rc ++ fi ++} ++ + # set sysctl values in NS-A + set_sysctl() + { +@@ -447,6 +469,36 @@ setup() + sleep 1 + } + ++setup_lla_only() ++{ ++ # make sure we are starting with a clean slate ++ kill_procs ++ cleanup 2>/dev/null ++ ++ log_debug "Configuring network namespaces" ++ set -e ++ ++ create_ns ${NSA} "-" "-" ++ create_ns ${NSB} "-" "-" ++ create_ns ${NSC} "-" "-" ++ connect_ns ${NSA} ${NSA_DEV} "-" "-" \ ++ ${NSB} ${NSB_DEV} "-" "-" ++ connect_ns ${NSA} ${NSA_DEV2} "-" "-" \ ++ ${NSC} ${NSC_DEV} "-" "-" ++ ++ NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV}) ++ NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV}) ++ NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV}) ++ ++ create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-" ++ ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF} ++ ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF} ++ ++ set +e ++ ++ sleep 1 ++} ++ + ################################################################################ + # IPv4 + +@@ -3329,10 +3381,53 @@ use_case_br() + setup_cmd_nsb ip li del vlan100 2>/dev/null + } + ++# VRF only. ++# ns-A device is connected to both ns-B and ns-C on a single VRF but only has ++# LLA on the interfaces ++use_case_ping_lla_multi() ++{ ++ setup_lla_only ++ # only want reply from ns-A ++ setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 ++ setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 ++ ++ log_start ++ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} ++ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B" ++ ++ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} ++ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C" ++ ++ # cycle/flap the first ns-A interface ++ setup_cmd ip link set ${NSA_DEV} down ++ setup_cmd ip link set ${NSA_DEV} up ++ sleep 1 ++ ++ log_start ++ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} ++ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B" ++ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} ++ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C" ++ ++ # cycle/flap the second ns-A interface ++ setup_cmd ip link set ${NSA_DEV2} down ++ setup_cmd ip link set ${NSA_DEV2} up ++ sleep 1 ++ ++ log_start ++ run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} ++ log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B" ++ run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} ++ log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C" ++} ++ + use_cases() + { + log_section "Use cases" ++ log_subsection "Device enslaved to bridge" + use_case_br ++ log_subsection "Ping LLA with multiple interfaces" ++ use_case_ping_lla_multi + } + + ################################################################################ |