diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1205_linux-4.19.206.patch | 1086 |
2 files changed, 1090 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 6a39170b..1c6d7ee9 100644 --- a/0000_README +++ b/0000_README @@ -859,6 +859,10 @@ Patch: 1204_linux-4.19.205.patch From: https://www.kernel.org Desc: Linux 4.19.205 +Patch: 1205_linux-4.19.206.patch +From: https://www.kernel.org +Desc: Linux 4.19.206 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1205_linux-4.19.206.patch b/1205_linux-4.19.206.patch new file mode 100644 index 00000000..8997401d --- /dev/null +++ b/1205_linux-4.19.206.patch @@ -0,0 +1,1086 @@ +diff --git a/Makefile b/Makefile +index abc35829f47ba..3a3eea3ab10a5 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 205 ++SUBLEVEL = 206 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S +index f35ed578e007e..4d823d3f65bb3 100644 +--- a/arch/arc/kernel/vmlinux.lds.S ++++ b/arch/arc/kernel/vmlinux.lds.S +@@ -92,6 +92,8 @@ SECTIONS + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT ++ IRQENTRY_TEXT ++ SOFTIRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + } +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 762baba4ecd51..0cb82172c06cf 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -4557,7 +4557,16 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + void + reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) + { +- bool uses_nx = context->nx || context->base_role.smep_andnot_wp; ++ /* ++ * KVM uses NX when TDP is disabled to handle a variety of scenarios, ++ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and ++ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. ++ * The iTLB multi-hit workaround can be toggled at any time, so assume ++ * NX can be used by any non-nested shadow MMU to avoid having to reset ++ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. ++ */ ++ bool uses_nx = context->nx || !tdp_enabled || ++ context->base_role.smep_andnot_wp; + struct rsvd_bits_validate *shadow_zero_check; + int i; + +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index 04383f14c74a9..8f444b375761c 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -4074,22 +4074,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) + if (UFDCS->rawcmd == 1) + UFDCS->rawcmd = 2; + +- if (mode & (FMODE_READ|FMODE_WRITE)) { +- UDRS->last_checked = 0; +- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); +- check_disk_change(bdev); +- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) +- goto out; +- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) ++ if (!(mode & FMODE_NDELAY)) { ++ if (mode & (FMODE_READ|FMODE_WRITE)) { ++ UDRS->last_checked = 0; ++ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); ++ check_disk_change(bdev); ++ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) ++ goto out; ++ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) ++ goto out; ++ } ++ res = -EROFS; ++ if ((mode & FMODE_WRITE) && ++ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + goto out; + } +- +- res = -EROFS; +- +- if ((mode & FMODE_WRITE) && +- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) +- goto out; +- + mutex_unlock(&open_lock); + mutex_unlock(&floppy_mutex); + return 0; +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c +index ab8847c7dd964..87e13bcd7a670 100644 +--- a/drivers/gpu/drm/drm_ioc32.c ++++ b/drivers/gpu/drm/drm_ioc32.c +@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + req.request.sequence = req32.request.sequence; + req.request.signal = req32.request.signal; + err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); +- if (err) +- return err; + + req32.reply.type = req.reply.type; + req32.reply.sequence = req.reply.sequence; +@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + +- return 0; ++ return err; + } + + #if defined(CONFIG_X86) +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +index 818d21bd28d31..1d2837c5a8f29 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +@@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) + return ret; + } + +-static void ++void + nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) + { + struct nvkm_dp *dp = nvkm_dp(outp); +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h +index 495f665a0ee68..12d6ff4cfa95d 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h +@@ -32,6 +32,7 @@ struct nvkm_dp { + + int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *, + struct nvkm_outp **); ++void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *); + + /* DPCD Receiver Capabilities */ + #define DPCD_RC00_DPCD_REV 0x00000 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +index c62030c96fba0..4b1c72fd8f039 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +@@ -22,6 +22,7 @@ + * Authors: Ben Skeggs + */ + #include "outp.h" ++#include "dp.h" + #include "ior.h" + + #include <subdev/bios.h> +@@ -216,6 +217,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp) + if (!ior->arm.head || ior->arm.proto != proto) { + OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, + ior->arm.proto, proto); ++ ++ /* The EFI GOP driver on Ampere can leave unused DP links routed, ++ * which we don't expect. The DisableLT IED script *should* get ++ * us back to where we need to be. ++ */ ++ if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP) ++ nvkm_dp_disable(outp, ior); ++ + return; + } + +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c +index 291c12f588b58..38258de75a94c 100644 +--- a/drivers/infiniband/hw/hfi1/sdma.c ++++ b/drivers/infiniband/hw/hfi1/sdma.c +@@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde, + static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) + { + int i; ++ struct sdma_desc *descp; + + /* Handle last descriptor */ + if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { +@@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) + if (unlikely(tx->num_desc == MAX_DESC)) + goto enomem; + +- tx->descp = kmalloc_array( +- MAX_DESC, +- sizeof(struct sdma_desc), +- GFP_ATOMIC); +- if (!tx->descp) ++ descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); ++ if (!descp) + goto enomem; ++ tx->descp = descp; + + /* reserve last descriptor for coalescing */ + tx->desc_limit = MAX_DESC - 1; +diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c +index e95358269525d..d4e6b40f0ed4e 100644 +--- a/drivers/net/can/usb/esd_usb2.c ++++ b/drivers/net/can/usb/esd_usb2.c +@@ -236,8 +236,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, + if (id == ESD_EV_CAN_ERROR_EXT) { + u8 state = msg->msg.rx.data[0]; + u8 ecc = msg->msg.rx.data[1]; +- u8 txerr = msg->msg.rx.data[2]; +- u8 rxerr = msg->msg.rx.data[3]; ++ u8 rxerr = msg->msg.rx.data[2]; ++ u8 txerr = msg->msg.rx.data[3]; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (skb == NULL) { +diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c +index 0f2ad50f3bd78..7f37e7cb687ef 100644 +--- a/drivers/net/ethernet/apm/xgene-v2/main.c ++++ b/drivers/net/ethernet/apm/xgene-v2/main.c +@@ -691,11 +691,13 @@ static int xge_probe(struct platform_device *pdev) + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); +- goto err; ++ goto err_mdio_remove; + } + + return 0; + ++err_mdio_remove: ++ xge_mdio_remove(ndev); + err: + free_netdev(ndev); + +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +index a75d7c826fc2b..dd935cd1fb44f 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +@@ -204,21 +204,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) + u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; +- u8 i, j, pfc_map, *prio_tc; + int ret; ++ u8 i; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = hdev->pfc_max; +- prio_tc = hdev->tm_info.prio_tc; +- pfc_map = hdev->tm_info.hw_pfc_map; +- +- /* Pfc setting is based on TC */ +- for (i = 0; i < hdev->tm_info.num_tc; i++) { +- for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { +- if ((prio_tc[j] == i) && (pfc_map & BIT(i))) +- pfc->pfc_en |= BIT(j); +- } +- } ++ pfc->pfc_en = hdev->tm_info.pfc_en; + + ret = hclge_pfc_tx_stats_get(hdev, requests); + if (ret) +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c +index 7998a73b6a0fa..fbad77450725a 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -995,6 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) + { + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; ++ u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ ++ u16 lat_enc_d = 0; /* latency decoded */ + u16 lat_enc = 0; /* latency encoded */ + + if (link) { +@@ -1048,7 +1050,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + +- if (lat_enc > max_ltr_enc) ++ lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * ++ (1U << (E1000_LTRV_SCALE_FACTOR * ++ ((lat_enc & E1000_LTRV_SCALE_MASK) ++ >> E1000_LTRV_SCALE_SHIFT))); ++ ++ max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * ++ (1U << (E1000_LTRV_SCALE_FACTOR * ++ ((max_ltr_enc & E1000_LTRV_SCALE_MASK) ++ >> E1000_LTRV_SCALE_SHIFT))); ++ ++ if (lat_enc_d > max_ltr_enc_d) + lat_enc = max_ltr_enc; + } + +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h +index 1502895eb45dd..e757896287eba 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h +@@ -274,8 +274,11 @@ + + /* Latency Tolerance Reporting */ + #define E1000_LTRV 0x000F8 ++#define E1000_LTRV_VALUE_MASK 0x000003FF + #define E1000_LTRV_SCALE_MAX 5 + #define E1000_LTRV_SCALE_FACTOR 5 ++#define E1000_LTRV_SCALE_SHIFT 10 ++#define E1000_LTRV_SCALE_MASK 0x00001C00 + #define E1000_LTRV_REQ_SHIFT 15 + #define E1000_LTRV_NOSNOOP_SHIFT 16 + #define E1000_LTRV_SEND (1 << 30) +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index fda5dd8c71ebd..382d010e1294e 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -100,7 +100,7 @@ + #define MVNETA_DESC_SWAP BIT(6) + #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) + #define MVNETA_PORT_STATUS 0x2444 +-#define MVNETA_TX_IN_PRGRS BIT(1) ++#define MVNETA_TX_IN_PRGRS BIT(0) + #define MVNETA_TX_FIFO_EMPTY BIT(8) + #define MVNETA_RX_MIN_FRAME_SIZE 0x247c + #define MVNETA_SERDES_CFG 0x24A0 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +index 2847509a183d0..cb3569ac85f77 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +@@ -354,6 +354,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) + unsigned long flags; + int rc = -EINVAL; + ++ if (!p_ll2_conn) ++ return rc; ++ + spin_lock_irqsave(&p_tx->lock, flags); + if (p_tx->b_completing_packet) { + rc = -EBUSY; +@@ -527,7 +530,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) + unsigned long flags = 0; + int rc = 0; + ++ if (!p_ll2_conn) ++ return rc; ++ + spin_lock_irqsave(&p_rx->lock, flags); ++ ++ if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { ++ spin_unlock_irqrestore(&p_rx->lock, flags); ++ return 0; ++ } ++ + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + +@@ -848,6 +860,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + int rc; + ++ if (!p_ll2_conn) ++ return 0; ++ + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + +@@ -871,6 +886,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) + u16 new_idx = 0, num_bds = 0; + int rc; + ++ if (!p_ll2_conn) ++ return 0; ++ + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + +@@ -1628,6 +1646,8 @@ int qed_ll2_post_rx_buffer(void *cxt, + if (!p_ll2_conn) + return -EINVAL; + p_rx = &p_ll2_conn->rx_queue; ++ if (!p_rx->set_prod_addr) ++ return -EIO; + + spin_lock_irqsave(&p_rx->lock, flags); + if (!list_empty(&p_rx->free_descq)) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +index 909422d939033..3392982ff3743 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +@@ -1244,8 +1244,7 @@ qed_rdma_create_qp(void *rdma_cxt, + + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { +- DP_ERR(p_hwfn->cdev, +- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", ++ pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", + rdma_cxt, in_params, out_params); + return NULL; + } +diff --git a/drivers/opp/of.c b/drivers/opp/of.c +index d64a13d7881b9..a53123356697f 100644 +--- a/drivers/opp/of.c ++++ b/drivers/opp/of.c +@@ -423,8 +423,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) + } + } + +- /* There should be one of more OPP defined */ +- if (WARN_ON(!count)) { ++ /* There should be one or more OPPs defined */ ++ if (!count) { ++ dev_err(dev, "%s: no supported OPPs", __func__); + ret = -ENOENT; + goto put_opp_table; + } +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c +index ce6c7dd7bc126..076b8a3f8e7a5 100644 +--- a/drivers/tty/vt/vt_ioctl.c ++++ b/drivers/tty/vt/vt_ioctl.c +@@ -484,16 +484,19 @@ int vt_ioctl(struct tty_struct *tty, + ret = -EINVAL; + goto out; + } +- /* FIXME: this needs the console lock extending */ +- if (vc->vc_mode == (unsigned char) arg) ++ console_lock(); ++ if (vc->vc_mode == (unsigned char) arg) { ++ console_unlock(); + break; ++ } + vc->vc_mode = (unsigned char) arg; +- if (console != fg_console) ++ if (console != fg_console) { ++ console_unlock(); + break; ++ } + /* + * explicitly blank/unblank the screen if switching modes + */ +- console_lock(); + if (arg == KD_TEXT) + do_unblank_screen(1); + else +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index c93bed41d988a..d61b7aa5d8e5d 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -894,19 +894,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) + + static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) + { +- struct dwc3_trb *tmp; + u8 trbs_left; + + /* +- * If enqueue & dequeue are equal than it is either full or empty. +- * +- * One way to know for sure is if the TRB right before us has HWO bit +- * set or not. If it has, then we're definitely full and can't fit any +- * more transfers in our ring. ++ * If the enqueue & dequeue are equal then the TRB ring is either full ++ * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs ++ * pending to be processed by the driver. + */ + if (dep->trb_enqueue == dep->trb_dequeue) { +- tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); +- if (tmp->ctrl & DWC3_TRB_CTRL_HWO) ++ /* ++ * If there is any request remained in the started_list at ++ * this point, that means there is no TRB available. ++ */ ++ if (!list_empty(&dep->started_list)) + return 0; + + return DWC3_TRB_NUM - 1; +@@ -1805,10 +1805,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) + + ret = wait_for_completion_timeout(&dwc->ep0_in_setup, + msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); +- if (ret == 0) { +- dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); +- return -ETIMEDOUT; +- } ++ if (ret == 0) ++ dev_warn(dwc->dev, "timed out waiting for SETUP phase\n"); + } + + spin_lock_irqsave(&dwc->lock, flags); +@@ -1946,6 +1944,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) + /* begin to receive SETUP packets */ + dwc->ep0state = EP0_SETUP_PHASE; + dwc->link_state = DWC3_LINK_STATE_SS_DIS; ++ dwc->delayed_status = false; + dwc3_ep0_out_start(dwc); + + dwc3_gadget_enable_irq(dwc); +diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c +index 0cb0c638fd131..168303f21bf45 100644 +--- a/drivers/usb/gadget/function/u_audio.c ++++ b/drivers/usb/gadget/function/u_audio.c +@@ -349,8 +349,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep) + if (!prm->ep_enabled) + return; + +- prm->ep_enabled = false; +- + audio_dev = uac->audio_dev; + params = &audio_dev->params; + +@@ -368,11 +366,12 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep) + } + } + ++ prm->ep_enabled = false; ++ + if (usb_ep_disable(ep)) + dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__); + } + +- + int u_audio_start_capture(struct g_audio *audio_dev) + { + struct snd_uac_chip *uac = audio_dev->uac; +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index e6dce35ca1aa9..c87cb25e70ec5 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -625,7 +625,6 @@ static struct usb_serial_driver ch341_device = { + .owner = THIS_MODULE, + .name = "ch341-uart", + }, +- .bulk_in_size = 512, + .id_table = id_table, + .num_ports = 1, + .open = ch341_open, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index e6103a27e4403..d08b799c91fc6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(4) | RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ + .driver_info = RSVD(6) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ +diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c +index 59c61744dcc16..97aa9b87e5722 100644 +--- a/drivers/vhost/vringh.c ++++ b/drivers/vhost/vringh.c +@@ -330,7 +330,7 @@ __vringh_iov(struct vringh *vrh, u16 i, + iov = wiov; + else { + iov = riov; +- if (unlikely(wiov && wiov->i)) { ++ if (unlikely(wiov && wiov->used)) { + vringh_bad("Readable desc %p after writable", + &descs[i]); + err = -EINVAL; +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 84845275dbef0..de04c097d67c7 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -991,6 +991,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) + goto done; + } + ++ /* bitfill_aligned() assumes that it's at least 8x8 */ ++ if (var->xres < 8 || var->yres < 8) ++ return -EINVAL; ++ + ret = info->fbops->fb_check_var(var, info); + + if (ret) +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c +index 45b04bc91f248..b7cc63f556eea 100644 +--- a/drivers/virtio/virtio_pci_common.c ++++ b/drivers/virtio/virtio_pci_common.c +@@ -579,6 +579,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct device *dev = get_device(&vp_dev->vdev.dev); + ++ /* ++ * Device is marked broken on surprise removal so that virtio upper ++ * layers can abort any ongoing operation. ++ */ ++ if (!pci_device_is_present(pci_dev)) ++ virtio_break_device(&vp_dev->vdev); ++ + pci_disable_sriov(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index df7980aef927a..0cc0cfd3a3cb7 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -1197,7 +1197,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + +- return vq->broken; ++ return READ_ONCE(vq->broken); + } + EXPORT_SYMBOL_GPL(virtqueue_is_broken); + +@@ -1211,7 +1211,9 @@ void virtio_break_device(struct virtio_device *dev) + + list_for_each_entry(_vq, &dev->vqs, list) { + struct vring_virtqueue *vq = to_vvq(_vq); +- vq->broken = true; ++ ++ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ ++ WRITE_ONCE(vq->broken, true); + } + } + EXPORT_SYMBOL_GPL(virtio_break_device); +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 117f9380069a8..7c84762cb59e5 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -77,6 +77,14 @@ struct sock_reuseport; + + /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ + ++#define BPF_ALU_REG(CLASS, OP, DST, SRC) \ ++ ((struct bpf_insn) { \ ++ .code = CLASS | BPF_OP(OP) | BPF_X, \ ++ .dst_reg = DST, \ ++ .src_reg = SRC, \ ++ .off = 0, \ ++ .imm = 0 }) ++ + #define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ +@@ -123,6 +131,14 @@ struct sock_reuseport; + + /* Short form of mov, dst_reg = src_reg */ + ++#define BPF_MOV_REG(CLASS, DST, SRC) \ ++ ((struct bpf_insn) { \ ++ .code = CLASS | BPF_MOV | BPF_X, \ ++ .dst_reg = DST, \ ++ .src_reg = SRC, \ ++ .off = 0, \ ++ .imm = 0 }) ++ + #define BPF_MOV64_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ +@@ -157,6 +173,14 @@ struct sock_reuseport; + .off = 0, \ + .imm = IMM }) + ++#define BPF_RAW_REG(insn, DST, SRC) \ ++ ((struct bpf_insn) { \ ++ .code = (insn).code, \ ++ .dst_reg = DST, \ ++ .src_reg = SRC, \ ++ .off = (insn).off, \ ++ .imm = (insn).imm }) ++ + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ + #define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index ca5f053c6b66a..fbd689c15974e 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -3594,6 +3594,10 @@ int netdev_rx_handler_register(struct net_device *dev, + void netdev_rx_handler_unregister(struct net_device *dev); + + bool dev_valid_name(const char *name); ++static inline bool is_socket_ioctl_cmd(unsigned int cmd) ++{ ++ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; ++} + int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, + bool *need_copyout); + int dev_ifconf(struct net *net, struct ifconf *, int); +diff --git a/include/linux/once.h b/include/linux/once.h +index 9225ee6d96c75..ae6f4eb41cbe7 100644 +--- a/include/linux/once.h ++++ b/include/linux/once.h +@@ -7,7 +7,7 @@ + + bool __do_once_start(bool *done, unsigned long *flags); + void __do_once_done(bool *done, struct static_key_true *once_key, +- unsigned long *flags); ++ unsigned long *flags, struct module *mod); + + /* Call a function exactly once. The idea of DO_ONCE() is to perform + * a function call such as initialization of random seeds, etc, only +@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_done(&___done, &___once_key, \ +- &___flags); \ ++ &___flags, THIS_MODULE); \ + } \ + } \ + ___ret; \ +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 36be400c3e657..d2b6d2459aad4 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -705,9 +705,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, + * below. + * + * Constant blinding is only used by JITs, not in the interpreter. +- * The interpreter uses AX in some occasions as a local temporary +- * register e.g. in DIV or MOD instructions. +- * + * In restricted circumstances, the verifier can also use the AX + * register for rewrites as long as they do not interfere with + * the above cases! +@@ -1057,6 +1054,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) + #undef BPF_INSN_3_LBL + #undef BPF_INSN_2_LBL + u32 tail_call_cnt = 0; ++ u64 tmp; + + #define CONT ({ insn++; goto select_insn; }) + #define CONT_JMP ({ insn++; goto select_insn; }) +@@ -1117,36 +1115,36 @@ select_insn: + (*(s64 *) &DST) >>= IMM; + CONT; + ALU64_MOD_X: +- div64_u64_rem(DST, SRC, &AX); +- DST = AX; ++ div64_u64_rem(DST, SRC, &tmp); ++ DST = tmp; + CONT; + ALU_MOD_X: +- AX = (u32) DST; +- DST = do_div(AX, (u32) SRC); ++ tmp = (u32) DST; ++ DST = do_div(tmp, (u32) SRC); + CONT; + ALU64_MOD_K: +- div64_u64_rem(DST, IMM, &AX); +- DST = AX; ++ div64_u64_rem(DST, IMM, &tmp); ++ DST = tmp; + CONT; + ALU_MOD_K: +- AX = (u32) DST; +- DST = do_div(AX, (u32) IMM); ++ tmp = (u32) DST; ++ DST = do_div(tmp, (u32) IMM); + CONT; + ALU64_DIV_X: + DST = div64_u64(DST, SRC); + CONT; + ALU_DIV_X: +- AX = (u32) DST; +- do_div(AX, (u32) SRC); +- DST = (u32) AX; ++ tmp = (u32) DST; ++ do_div(tmp, (u32) SRC); ++ DST = (u32) tmp; + CONT; + ALU64_DIV_K: + DST = div64_u64(DST, IMM); + CONT; + ALU_DIV_K: +- AX = (u32) DST; +- do_div(AX, (u32) IMM); +- DST = (u32) AX; ++ tmp = (u32) DST; ++ do_div(tmp, (u32) IMM); ++ DST = (u32) tmp; + CONT; + ALU_END_TO_BE: + switch (IMM) { +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 2bf83305e5aba..abdc9eca463c5 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -6177,28 +6177,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + struct bpf_insn mask_and_div[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), +- /* Rx div 0 -> 0 */ +- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), +- BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), ++ BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), ++ /* [R,W]x div 0 -> 0 */ ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 2), ++ BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- *insn, ++ BPF_ALU_REG(BPF_CLASS(insn->code), BPF_XOR, insn->dst_reg, insn->dst_reg), + }; + struct bpf_insn mask_and_mod[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), +- /* Rx mod 0 -> Rx */ +- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), +- *insn, ++ BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 1 + (is64 ? 0 : 1)), ++ BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), ++ BPF_JMP_IMM(BPF_JA, 0, 0, 1), ++ BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), + }; + struct bpf_insn *patchlet; + + if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { +- patchlet = mask_and_div + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); ++ patchlet = mask_and_div; ++ cnt = ARRAY_SIZE(mask_and_div); + } else { +- patchlet = mask_and_mod + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); ++ patchlet = mask_and_mod; ++ cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 2 : 0); + } + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); +diff --git a/lib/once.c b/lib/once.c +index 8b7d6235217ee..59149bf3bfb4a 100644 +--- a/lib/once.c ++++ b/lib/once.c +@@ -3,10 +3,12 @@ + #include <linux/spinlock.h> + #include <linux/once.h> + #include <linux/random.h> ++#include <linux/module.h> + + struct once_work { + struct work_struct work; + struct static_key_true *key; ++ struct module *module; + }; + + static void once_deferred(struct work_struct *w) +@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w) + work = container_of(w, struct once_work, work); + BUG_ON(!static_key_enabled(work->key)); + static_branch_disable(work->key); ++ module_put(work->module); + kfree(work); + } + +-static void once_disable_jump(struct static_key_true *key) ++static void once_disable_jump(struct static_key_true *key, struct module *mod) + { + struct once_work *w; + +@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key) + + INIT_WORK(&w->work, once_deferred); + w->key = key; ++ w->module = mod; ++ __module_get(mod); + schedule_work(&w->work); + } + +@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags) + EXPORT_SYMBOL(__do_once_start); + + void __do_once_done(bool *done, struct static_key_true *once_key, +- unsigned long *flags) ++ unsigned long *flags, struct module *mod) + __releases(once_lock) + { + *done = true; + spin_unlock_irqrestore(&once_lock, *flags); +- once_disable_jump(once_key); ++ once_disable_jump(once_key, mod); + } + EXPORT_SYMBOL(__do_once_done); +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index de6f89511a216..a8a37d1128201 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -449,6 +449,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, + + static int gre_handle_offloads(struct sk_buff *skb, bool csum) + { ++ if (csum && skb_checksum_start(skb) < skb->data) ++ return -EINVAL; + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); + } + +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index c5590d36b7750..a38caf317dbb4 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -70,10 +70,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash); + + struct conntrack_gc_work { + struct delayed_work dwork; +- u32 last_bucket; ++ u32 next_bucket; + bool exiting; + bool early_drop; +- long next_gc_run; + }; + + static __read_mostly struct kmem_cache *nf_conntrack_cachep; +@@ -81,12 +80,8 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock; + static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); + static __read_mostly bool nf_conntrack_locks_all; + +-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ +-#define GC_MAX_BUCKETS_DIV 128u +-/* upper bound of full table scan */ +-#define GC_MAX_SCAN_JIFFIES (16u * HZ) +-/* desired ratio of entries found to be expired */ +-#define GC_EVICT_RATIO 50u ++#define GC_SCAN_INTERVAL (120u * HZ) ++#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10) + + static struct conntrack_gc_work conntrack_gc_work; + +@@ -1198,17 +1193,13 @@ static void nf_ct_offload_timeout(struct nf_conn *ct) + + static void gc_worker(struct work_struct *work) + { +- unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); +- unsigned int i, goal, buckets = 0, expired_count = 0; +- unsigned int nf_conntrack_max95 = 0; ++ unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION; ++ unsigned int i, hashsz, nf_conntrack_max95 = 0; ++ unsigned long next_run = GC_SCAN_INTERVAL; + struct conntrack_gc_work *gc_work; +- unsigned int ratio, scanned = 0; +- unsigned long next_run; +- + gc_work = container_of(work, struct conntrack_gc_work, dwork.work); + +- goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; +- i = gc_work->last_bucket; ++ i = gc_work->next_bucket; + if (gc_work->early_drop) + nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; + +@@ -1216,22 +1207,21 @@ static void gc_worker(struct work_struct *work) + struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_head *ct_hash; + struct hlist_nulls_node *n; +- unsigned int hashsz; + struct nf_conn *tmp; + +- i++; + rcu_read_lock(); + + nf_conntrack_get_ht(&ct_hash, &hashsz); +- if (i >= hashsz) +- i = 0; ++ if (i >= hashsz) { ++ rcu_read_unlock(); ++ break; ++ } + + hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { + struct net *net; + + tmp = nf_ct_tuplehash_to_ctrack(h); + +- scanned++; + if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { + nf_ct_offload_timeout(tmp); + continue; +@@ -1239,7 +1229,6 @@ static void gc_worker(struct work_struct *work) + + if (nf_ct_is_expired(tmp)) { + nf_ct_gc_expired(tmp); +- expired_count++; + continue; + } + +@@ -1271,7 +1260,14 @@ static void gc_worker(struct work_struct *work) + */ + rcu_read_unlock(); + cond_resched(); +- } while (++buckets < goal); ++ i++; ++ ++ if (time_after(jiffies, end_time) && i < hashsz) { ++ gc_work->next_bucket = i; ++ next_run = 0; ++ break; ++ } ++ } while (i < hashsz); + + if (gc_work->exiting) + return; +@@ -1282,40 +1278,17 @@ static void gc_worker(struct work_struct *work) + * + * This worker is only here to reap expired entries when system went + * idle after a busy period. +- * +- * The heuristics below are supposed to balance conflicting goals: +- * +- * 1. Minimize time until we notice a stale entry +- * 2. Maximize scan intervals to not waste cycles +- * +- * Normally, expire ratio will be close to 0. +- * +- * As soon as a sizeable fraction of the entries have expired +- * increase scan frequency. + */ +- ratio = scanned ? expired_count * 100 / scanned : 0; +- if (ratio > GC_EVICT_RATIO) { +- gc_work->next_gc_run = min_interval; +- } else { +- unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; +- +- BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); +- +- gc_work->next_gc_run += min_interval; +- if (gc_work->next_gc_run > max) +- gc_work->next_gc_run = max; ++ if (next_run) { ++ gc_work->early_drop = false; ++ gc_work->next_bucket = 0; + } +- +- next_run = gc_work->next_gc_run; +- gc_work->last_bucket = i; +- gc_work->early_drop = false; + queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); + } + + static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) + { + INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); +- gc_work->next_gc_run = HZ; + gc_work->exiting = false; + } + +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index 1e2772913957d..128d0a48478d1 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -321,7 +321,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) + goto err; + } + +- if (len != ALIGN(size, 4) + hdrlen) ++ if (!size || len != ALIGN(size, 4) + hdrlen) + goto err; + + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA) +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c +index 6431a023ac895..46988c009a3e2 100644 +--- a/net/rds/ib_frmr.c ++++ b/net/rds/ib_frmr.c +@@ -111,9 +111,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) + cpu_relax(); + } + +- ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, ++ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, + &off, PAGE_SIZE); +- if (unlikely(ret != ibmr->sg_len)) ++ if (unlikely(ret != ibmr->sg_dma_len)) + return ret < 0 ? ret : -EINVAL; + + /* Perform a WR for the fast_reg_mr. Each individual page +diff --git a/net/socket.c b/net/socket.c +index f14bca00ff010..e5cc9f2b981ed 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -1030,7 +1030,7 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, + rtnl_unlock(); + if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf))) + err = -EFAULT; +- } else { ++ } else if (is_socket_ioctl_cmd(cmd)) { + struct ifreq ifr; + bool need_copyout; + if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) +@@ -1039,6 +1039,8 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, + if (!err && need_copyout) + if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) + return -EFAULT; ++ } else { ++ err = -ENOTTY; + } + return err; + } +@@ -3064,6 +3066,8 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, + struct ifreq ifreq; + u32 data32; + ++ if (!is_socket_ioctl_cmd(cmd)) ++ return -ENOTTY; + if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ)) + return -EFAULT; + if (get_user(data32, &u_ifreq32->ifr_data)) |