summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2021-04-28 21:05:13 +0900
committerAlice Ferrazzi <alicef@gentoo.org>2021-04-28 21:05:27 +0900
commit96af2504c6ae2a1e698861b8847b14b7ace48889 (patch)
tree218122affe1f032dcc294eb35819ab77f0ef0df6
parentLinux patch 5.11.16 (diff)
downloadlinux-patches-96af2504c6ae2a1e698861b8847b14b7ace48889.tar.gz
linux-patches-96af2504c6ae2a1e698861b8847b14b7ace48889.tar.bz2
linux-patches-96af2504c6ae2a1e698861b8847b14b7ace48889.zip
Linux patch 5.11.175.11-20
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1016_linux-5.11.17.patch2076
2 files changed, 2080 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e06ab594..c4f4eb4b 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch: 1015_linux-5.11.16.patch
From: http://www.kernel.org
Desc: Linux 5.11.16
+Patch: 1016_linux-5.11.17.patch
+From: http://www.kernel.org
+Desc: Linux 5.11.17
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1016_linux-5.11.17.patch b/1016_linux-5.11.17.patch
new file mode 100644
index 00000000..86f76ba0
--- /dev/null
+++ b/1016_linux-5.11.17.patch
@@ -0,0 +1,2076 @@
+diff --git a/Makefile b/Makefile
+index 124d8e2007765..d8367e1932324 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+
+diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
+index 9dcae1f2bc99f..c5b9da0d7e6ce 100644
+--- a/arch/arm/boot/dts/omap3.dtsi
++++ b/arch/arm/boot/dts/omap3.dtsi
+@@ -24,6 +24,9 @@
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
++ mmc0 = &mmc1;
++ mmc1 = &mmc2;
++ mmc2 = &mmc3;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+index a1f621b388fe7..358df6d926aff 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+@@ -10,5 +10,5 @@
+ };
+
+ &mmc0 {
+- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
++ broken-cd; /* card detect is broken on *some* boards */
+ };
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 66aac2881ba84..85645b2b0c7ab 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+ if (!instruction_pointer(regs))
+ BUG();
+
+- if (kcb->kprobe_status == KPROBE_REENTER)
++ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+- else
++ } else {
++ kprobes_restore_local_irqflag(kcb, regs);
+ reset_current_kprobe();
++ }
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
+index 89dd2fcf38fa1..3b16d081b4d7f 100644
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "11"
+
+-config RAM_BASE
++config DRAM_BASE
+ hex "DRAM start addr (the same with memory-section in dts)"
+ default 0x0
+
+diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
+index 9b98bf31d57ce..16878240ef9ac 100644
+--- a/arch/csky/include/asm/page.h
++++ b/arch/csky/include/asm/page.h
+@@ -28,7 +28,7 @@
+ #define SSEG_SIZE 0x20000000
+ #define LOWMEM_LIMIT (SSEG_SIZE * 2)
+
+-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
++#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
+index c7311131156e8..ba3edb8a04b16 100644
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
+ * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
+ * called yet. Note that node 0 will also count all non-existent cpus.
+ */
+-static int __meminit early_nr_cpus_node(int node)
++static int early_nr_cpus_node(int node)
+ {
+ int cpu, n = 0;
+
+@@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node)
+ * compute_pernodesize - compute size of pernode data
+ * @node: the node id.
+ */
+-static unsigned long __meminit compute_pernodesize(int node)
++static unsigned long compute_pernodesize(int node)
+ {
+ unsigned long pernodesize = 0, cpus;
+
+@@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void)
+ }
+ }
+
+-static void __meminit scatter_node_data(void)
++static void scatter_node_data(void)
+ {
+ pg_data_t **dst;
+ int node;
+diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
+index 7f5912af2a52e..21b1071e0a34a 100644
+--- a/arch/m68k/include/asm/page_mm.h
++++ b/arch/m68k/include/asm/page_mm.h
+@@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
+ ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
+ })
+ #else
+-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
++#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
+ #include <asm-generic/memory_model.h>
+ #endif
+
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index f1ba197b10c0e..f0a215cf010c7 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -976,6 +976,7 @@ ENDPROC(ext_int_handler)
+ * Load idle PSW.
+ */
+ ENTRY(psw_idle)
++ stg %r14,(__SF_GPRS+8*8)(%r15)
+ stg %r3,__SF_EMPTY(%r15)
+ larl %r1,.Lpsw_idle_exit
+ stg %r1,__SF_EMPTY+8(%r15)
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index bfd42e0853ed6..6c88f245b33ac 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4400,7 +4400,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
+- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
++ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 7bdb1821215db..3112186a4f4b2 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -1159,7 +1159,6 @@ enum {
+ SNBEP_PCI_QPI_PORT0_FILTER,
+ SNBEP_PCI_QPI_PORT1_FILTER,
+ BDX_PCI_QPI_PORT2_FILTER,
+- HSWEP_PCI_PCU_3,
+ };
+
+ static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+@@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
+ NULL,
+ };
+
+-void hswep_uncore_cpu_init(void)
++#define HSWEP_PCU_DID 0x2fc0
++#define HSWEP_PCU_CAPID4_OFFET 0x94
++#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
++
++static bool hswep_has_limit_sbox(unsigned int device)
+ {
+- int pkg = boot_cpu_data.logical_proc_id;
++ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
++ u32 capid4;
++
++ if (!dev)
++ return false;
++
++ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
++ if (!hswep_get_chop(capid4))
++ return true;
+
++ return false;
++}
++
++void hswep_uncore_cpu_init(void)
++{
+ if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+ /* Detect 6-8 core systems with only two SBOXes */
+- if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
+- u32 capid4;
+-
+- pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
+- 0x94, &capid4);
+- if (((capid4 >> 6) & 0x3) == 0)
+- hswep_uncore_sbox.num_boxes = 2;
+- }
++ if (hswep_has_limit_sbox(HSWEP_PCU_DID))
++ hswep_uncore_sbox.num_boxes = 2;
+
+ uncore_msr_uncores = hswep_msr_uncores;
+ }
+@@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
+ },
+- { /* PCU.3 (for Capability registers) */
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+- HSWEP_PCI_PCU_3),
+- },
+ { /* end: all zeroes */ }
+ };
+
+@@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
+ EVENT_CONSTRAINT_END
+ };
+
++#define BDX_PCU_DID 0x6fc0
++
+ void bdx_uncore_cpu_init(void)
+ {
+- int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
+-
+ if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ uncore_msr_uncores = bdx_msr_uncores;
+
+- /* BDX-DE doesn't have SBOX */
+- if (boot_cpu_data.x86_model == 86) {
+- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+ /* Detect systems with no SBOXes */
+- } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
+- struct pci_dev *pdev;
+- u32 capid4;
+-
+- pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
+- pci_read_config_dword(pdev, 0x94, &capid4);
+- if (((capid4 >> 6) & 0x3) == 0)
+- bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+- }
++ if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
++ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
++
+ hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
+ }
+
+@@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ BDX_PCI_QPI_PORT2_FILTER),
+ },
+- { /* PCU.3 (for Capability registers) */
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
+- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+- HSWEP_PCI_PCU_3),
+- },
+ { /* end: all zeroes */ }
+ };
+
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index a8f3af257e26c..b1deacbeb2669 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
+ struct crash_memmap_data cmd;
+ struct crash_mem *cmem;
+
+- cmem = vzalloc(sizeof(struct crash_mem));
++ cmem = vzalloc(struct_size(cmem, ranges, 1));
+ if (!cmem)
+ return -ENOMEM;
+
+diff --git a/block/ioctl.c b/block/ioctl.c
+index ff241e663c018..8ba1ed8defd0b 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -89,6 +89,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
++ if (bdev->bd_part_count)
++ return -EBUSY;
+
+ /*
+ * Reopen the device to revalidate the driver state and force a
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 71827d9b0aa19..b7260749e8eee 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
+ goto end;
+ }
+ if (!tdc->busy) {
+- err = pm_runtime_get_sync(tdc->tdma->dev);
++ err = pm_runtime_resume_and_get(tdc->tdma->dev);
+ if (err < 0) {
+ dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
+ goto end;
+@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ int err;
+
+- err = pm_runtime_get_sync(tdc->tdma->dev);
++ err = pm_runtime_resume_and_get(tdc->tdma->dev);
+ if (err < 0) {
+ dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
+ return;
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 55df63dead8d3..70b29bd079c9f 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ struct xilinx_dpdma_tx_desc *desc;
+ struct virt_dma_desc *vdesc;
+ u32 reg, channels;
++ bool first_frame;
+
+ lockdep_assert_held(&chan->lock);
+
+@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ chan->running = true;
+ }
+
+- if (chan->video_group)
+- channels = xilinx_dpdma_chan_video_group_ready(chan);
+- else
+- channels = BIT(chan->id);
+-
+- if (!channels)
+- return;
+-
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return;
+@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
+ upper_32_bits(sw_desc->dma_addr)));
+
+- if (chan->first_frame)
++ first_frame = chan->first_frame;
++ chan->first_frame = false;
++
++ if (chan->video_group) {
++ channels = xilinx_dpdma_chan_video_group_ready(chan);
++ /*
++ * Trigger the transfer only when all channels in the group are
++ * ready.
++ */
++ if (!channels)
++ return;
++ } else {
++ channels = BIT(chan->id);
++ }
++
++ if (first_frame)
+ reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
+ else
+ reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
+
+- chan->first_frame = false;
+-
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+ }
+
+@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
+ */
+ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
+ {
+- struct xilinx_dpdma_tx_desc *active = chan->desc.active;
++ struct xilinx_dpdma_tx_desc *active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dpdma_debugfs_desc_done_irq(chan);
+
++ active = chan->desc.active;
+ if (active)
+ vchan_cyclic_callback(&active->vdesc);
+ else
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 41952bb818ad5..56152263ab38f 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -29,6 +29,7 @@
+ #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
+
+ struct gpio_regs {
++ u32 sysconfig;
+ u32 irqenable1;
+ u32 irqenable2;
+ u32 wake_en;
+@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
+ const struct omap_gpio_reg_offs *regs = p->regs;
+ void __iomem *base = p->base;
+
++ p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
+ p->context.ctrl = readl_relaxed(base + regs->ctrl);
+ p->context.oe = readl_relaxed(base + regs->direction);
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en);
+@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
+ const struct omap_gpio_reg_offs *regs = bank->regs;
+ void __iomem *base = bank->base;
+
++ writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
+ writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
+ writel_relaxed(bank->context.ctrl, base + regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
+@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
+
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+
++ /* Save syconfig, it's runtime value can be different from init value */
++ if (bank->loses_context)
++ bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
++
+ if (!bank->enabled_non_wakeup_gpios)
+ goto update_gpio_context_count;
+
+@@ -1279,6 +1286,7 @@ out_unlock:
+
+ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
+ .revision = OMAP24XX_GPIO_REVISION,
++ .sysconfig = OMAP24XX_GPIO_SYSCONFIG,
+ .direction = OMAP24XX_GPIO_OE,
+ .datain = OMAP24XX_GPIO_DATAIN,
+ .dataout = OMAP24XX_GPIO_DATAOUT,
+@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
+
+ static const struct omap_gpio_reg_offs omap4_gpio_regs = {
+ .revision = OMAP4_GPIO_REVISION,
++ .sysconfig = OMAP4_GPIO_SYSCONFIG,
+ .direction = OMAP4_GPIO_OE,
+ .datain = OMAP4_GPIO_DATAIN,
+ .dataout = OMAP4_GPIO_DATAOUT,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b24cb44739132..8090c1e7a3bac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3298,7 +3298,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+- long r;
++ int r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+@@ -3347,6 +3347,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ value = 0;
+ }
+
++ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
++ if (r) {
++ pr_debug("failed %d to reserve fence slot\n", r);
++ goto error_unlock;
++ }
++
+ r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
+ addr, flags, value, NULL, NULL,
+ NULL);
+@@ -3358,7 +3364,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+- DRM_ERROR("Can't handle page fault (%ld)\n", r);
++ DRM_ERROR("Can't handle page fault (%d)\n", r);
+
+ error_unref:
+ amdgpu_bo_unref(&root);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index e7d6da05011ff..4f24663d81696 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ad4afbc37d516..54fd48ee5f275 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3962,13 +3962,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+- /*
+- * The arbitrary tiling support for multiplane formats has not been hooked
+- * up.
+- */
+- if (info->num_planes > 1)
+- return false;
+-
+ /*
+ * For D swizzle the canonical modifier depends on the bpp, so check
+ * it here.
+@@ -3987,6 +3980,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
+ /* Per radeonsi comments 16/64 bpp are more complicated. */
+ if (info->cpp[0] != 4)
+ return false;
++ /* We support multi-planar formats, but not when combined with
++ * additional DCC metadata planes. */
++ if (info->num_planes > 1)
++ return false;
+ }
+
+ return true;
+diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
+index 3feaece13ade0..6b665931147df 100644
+--- a/drivers/hid/hid-alps.c
++++ b/drivers/hid/hid-alps.c
+@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+
+ if (input_register_device(data->input2)) {
+ input_free_device(input2);
++ ret = -ENOENT;
+ goto exit;
+ }
+ }
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 1dfe184ebf5a1..2ab22b9259418 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
++ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 21e15627a4614..477baa30889cc 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -161,6 +161,7 @@ struct cp2112_device {
+ atomic_t read_avail;
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
++ struct irq_chip irq;
+ u8 *in_out_buffer;
+ struct mutex lock;
+
+@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+ return 0;
+ }
+
+-static struct irq_chip cp2112_gpio_irqchip = {
+- .name = "cp2112-gpio",
+- .irq_startup = cp2112_gpio_irq_startup,
+- .irq_shutdown = cp2112_gpio_irq_shutdown,
+- .irq_ack = cp2112_gpio_irq_ack,
+- .irq_mask = cp2112_gpio_irq_mask,
+- .irq_unmask = cp2112_gpio_irq_unmask,
+- .irq_set_type = cp2112_gpio_irq_type,
+-};
+-
+ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+ {
+@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ dev->gc.can_sleep = 1;
+ dev->gc.parent = &hdev->dev;
+
++ dev->irq.name = "cp2112-gpio";
++ dev->irq.irq_startup = cp2112_gpio_irq_startup;
++ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
++ dev->irq.irq_ack = cp2112_gpio_irq_ack;
++ dev->irq.irq_mask = cp2112_gpio_irq_mask;
++ dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
++ dev->irq.irq_set_type = cp2112_gpio_irq_type;
++ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
++
+ girq = &dev->gc.irq;
+- girq->chip = &cp2112_gpio_irqchip;
++ girq->chip = &dev->irq;
+ /* The event comes from the outside so no parent handler */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 85a054f1ce389..2a176f77b32e9 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev)
+ }
+
+ static const struct hid_device_id hammer_devices[] = {
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b60279aaed438..09d0499865160 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -191,6 +191,7 @@
+ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
+ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
+ #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
++#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
+ #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
+
+ #define USB_VENDOR_ID_ATEN 0x0557
+@@ -488,6 +489,7 @@
+ #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
++#define USB_DEVICE_ID_GOOGLE_DON 0x5050
+
+ #define USB_VENDOR_ID_GOTOP 0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2 0x007f
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 6cda5935fc09c..2d70dc4bea654 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ !wacom_wac->shared->is_touch_on) {
+ if (!wacom_wac->shared->touch_down)
+ return;
+- prox = 0;
++ prox = false;
+ }
+
+ wacom_wac->hid_data.num_received++;
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+index b248966837b4c..7aad40b2aa736 100644
+--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
++++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+@@ -412,7 +412,7 @@
+ | CN6XXX_INTR_M0UNWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UPWI_ERR \
+- | CN6XXX_INTR_M1UPB0_ERR \
++ | CN6XXX_INTR_M1UNB0_ERR \
+ | CN6XXX_INTR_M1UNWI_ERR \
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index f35b0b83fe85a..040edc6fc5609 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++ return -EINVAL;
++
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->cfg.info.key.tp_dst, sport);
+@@ -985,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
++ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
++ return -EINVAL;
++
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->cfg.info.key.tp_dst, sport);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index d18642a8144cf..4909405803d57 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
+ cancel_work_sync(&serial_table[i]->async_put_intf);
+ cancel_work_sync(&serial_table[i]->async_get_intf);
+ hso_serial_tty_unregister(serial);
+- kref_put(&serial_table[i]->ref, hso_serial_ref_free);
++ kref_put(&serial->parent->ref, hso_serial_ref_free);
+ }
+ }
+
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 6f10e0998f1ce..94d19158efc18 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
+ xenvif_carrier_on(be->vif);
+
+ unregister_hotplug_status_watch(be);
+- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+- hotplug_status_changed,
+- "%s/%s", dev->nodename, "hotplug-status");
+- if (!err)
++ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
++ NULL, hotplug_status_changed,
++ "%s/%s", dev->nodename,
++ "hotplug-status");
++ if (err)
++ goto err;
+ be->have_hotplug_status_watch = 1;
++ }
+
+ netif_tx_wake_all_queues(be->vif->dev);
+
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 9fc4433fece4f..20b477cd5a30a 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ unsigned i, pin;
+ #ifdef CONFIG_GPIOLIB
+ struct pinctrl_gpio_range *range;
+- unsigned int gpio_num;
+ struct gpio_chip *chip;
++ int gpio_num;
+ #endif
+
+ seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
+@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ seq_printf(s, "pin %d (%s) ", pin, desc->name);
+
+ #ifdef CONFIG_GPIOLIB
+- gpio_num = 0;
++ gpio_num = -1;
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ if ((pin >= range->pin_base) &&
+ (pin < (range->pin_base + range->npins))) {
+@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ break;
+ }
+ }
+- chip = gpio_to_chip(gpio_num);
+- if (chip && chip->gpiodev && chip->gpiodev->base)
+- seq_printf(s, "%u:%s ", gpio_num -
+- chip->gpiodev->base, chip->label);
++ if (gpio_num >= 0)
++ chip = gpio_to_chip(gpio_num);
++ else
++ chip = NULL;
++ if (chip)
++ seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
+ else
+ seq_puts(s, "0:? ");
+ #endif
+diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+index 7fdf4257df1ed..ad4b446d588e6 100644
+--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
++++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
+ static const struct intel_community lbg_communities[] = {
+ LBG_COMMUNITY(0, 0, 71),
+ LBG_COMMUNITY(1, 72, 132),
+- LBG_COMMUNITY(3, 133, 144),
+- LBG_COMMUNITY(4, 145, 180),
+- LBG_COMMUNITY(5, 181, 246),
++ LBG_COMMUNITY(3, 133, 143),
++ LBG_COMMUNITY(4, 144, 178),
++ LBG_COMMUNITY(5, 179, 246),
+ };
+
+ static const struct intel_pinctrl_soc_data lbg_soc_data = {
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
+index 1fd29f93ff6d6..5bdfb1565c14d 100644
+--- a/drivers/soc/qcom/qcom-geni-se.c
++++ b/drivers/soc/qcom/qcom-geni-se.c
+@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
++ if (has_acpi_companion(se->dev))
++ return 0;
++
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e79359326411a..bc035ba6e0105 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf)
+ struct urb *urb;
+ int rv = 0;
+
+- acm_unpoison_urbs(acm);
+ spin_lock_irq(&acm->write_lock);
+
+ if (--acm->susp_count)
+ goto out;
+
++ acm_unpoison_urbs(acm);
++
+ if (tty_port_initialized(&acm->port)) {
+ rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
+
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index d300f799efcd1..aa656f57bf5b7 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -273,8 +273,10 @@ done:
+ mr->log_size = log_entity_size;
+ mr->nsg = nsg;
+ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+- if (!mr->nent)
++ if (!mr->nent) {
++ err = -ENOMEM;
+ goto err_map;
++ }
+
+ err = create_direct_mr(mvdev, mr);
+ if (err)
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index e0a27e3362935..bfa4c6ef554e5 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int r = 0;
+
++ mutex_lock(&dev->mutex);
++
+ r = vhost_dev_check_owner(dev);
+ if (r)
+- return r;
++ goto unlock;
+
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ r = -EINVAL;
+ break;
+ }
++unlock:
++ mutex_unlock(&dev->mutex);
+
+ return r;
+ }
+diff --git a/fs/coda/file.c b/fs/coda/file.c
+index 128d63df5bfb6..ef5ca22bfb3ea 100644
+--- a/fs/coda/file.c
++++ b/fs/coda/file.c
+@@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ ret = call_mmap(vma->vm_file, vma);
+
+ if (ret) {
+- /* if call_mmap fails, our caller will put coda_file so we
+- * should drop the reference to the host_file that we got.
++ /* if call_mmap fails, our caller will put host_file so we
++ * should drop the reference to the coda_file that we got.
+ */
+- fput(host_file);
++ fput(coda_file);
+ kfree(cvm_ops);
+ } else {
+ /* here we add redirects for the open/close vm_operations */
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 077d3ad343f68..7bf6ac142ff04 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
+ if (WARN_ON(file != vma->vm_file))
+ return -EIO;
+
+- vma->vm_file = get_file(realfile);
++ vma_set_file(vma, realfile);
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = call_mmap(vma->vm_file, vma);
+ revert_creds(old_cred);
+-
+- if (ret) {
+- /* Drop reference count from new vm_file value */
+- fput(realfile);
+- } else {
+- /* Drop reference count from previous vm_file value */
+- fput(file);
+- }
+-
+ ovl_file_accessed(file);
+
+ return ret;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 88b581b75d5be..b14c045320fbf 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1288,6 +1288,11 @@ static inline bool bpf_allow_ptr_leaks(void)
+ return perfmon_capable();
+ }
+
++static inline bool bpf_allow_uninit_stack(void)
++{
++ return perfmon_capable();
++}
++
+ static inline bool bpf_allow_ptr_to_map_access(void)
+ {
+ return perfmon_capable();
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index e941fe1484e57..57c11e5bec6cf 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -195,7 +195,7 @@ struct bpf_func_state {
+ * 0 = main function, 1 = first callee.
+ */
+ u32 frameno;
+- /* subprog number == index within subprog_stack_depth
++ /* subprog number == index within subprog_info
+ * zero == main subprog
+ */
+ u32 subprogno;
+@@ -401,6 +401,7 @@ struct bpf_verifier_env {
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
++ bool allow_uninit_stack;
+ bool allow_ptr_to_map_access;
+ bool bpf_capable;
+ bool bypass_spec_v1;
+diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
+index 8b30b14b47d3f..f377817ce75c1 100644
+--- a/include/linux/platform_data/gpio-omap.h
++++ b/include/linux/platform_data/gpio-omap.h
+@@ -85,6 +85,7 @@
+ * omap2+ specific GPIO registers
+ */
+ #define OMAP24XX_GPIO_REVISION 0x0000
++#define OMAP24XX_GPIO_SYSCONFIG 0x0010
+ #define OMAP24XX_GPIO_IRQSTATUS1 0x0018
+ #define OMAP24XX_GPIO_IRQSTATUS2 0x0028
+ #define OMAP24XX_GPIO_IRQENABLE2 0x002c
+@@ -108,6 +109,7 @@
+ #define OMAP24XX_GPIO_SETDATAOUT 0x0094
+
+ #define OMAP4_GPIO_REVISION 0x0000
++#define OMAP4_GPIO_SYSCONFIG 0x0010
+ #define OMAP4_GPIO_EOI 0x0020
+ #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
+ #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
+@@ -148,6 +150,7 @@
+ #ifndef __ASSEMBLER__
+ struct omap_gpio_reg_offs {
+ u16 revision;
++ u16 sysconfig;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index c198d19fa1c89..d3a2f0cef76d1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2271,12 +2271,14 @@ static void save_register_state(struct bpf_func_state *state,
+ state->stack[spi].slot_type[i] = STACK_SPILL;
+ }
+
+-/* check_stack_read/write functions track spill/fill of registers,
++/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
+ * stack boundary and alignment are checked in check_mem_access()
+ */
+-static int check_stack_write(struct bpf_verifier_env *env,
+- struct bpf_func_state *state, /* func where register points to */
+- int off, int size, int value_regno, int insn_idx)
++static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
++ /* stack frame we're writing to */
++ struct bpf_func_state *state,
++ int off, int size, int value_regno,
++ int insn_idx)
+ {
+ struct bpf_func_state *cur; /* state of the current function */
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+@@ -2402,9 +2404,175 @@ static int check_stack_write(struct bpf_verifier_env *env,
+ return 0;
+ }
+
+-static int check_stack_read(struct bpf_verifier_env *env,
+- struct bpf_func_state *reg_state /* func where register points to */,
+- int off, int size, int value_regno)
++/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
++ * known to contain a variable offset.
++ * This function checks whether the write is permitted and conservatively
++ * tracks the effects of the write, considering that each stack slot in the
++ * dynamic range is potentially written to.
++ *
++ * 'off' includes 'regno->off'.
++ * 'value_regno' can be -1, meaning that an unknown value is being written to
++ * the stack.
++ *
++ * Spilled pointers in range are not marked as written because we don't know
++ * what's going to be actually written. This means that read propagation for
++ * future reads cannot be terminated by this write.
++ *
++ * For privileged programs, uninitialized stack slots are considered
++ * initialized by this write (even though we don't know exactly what offsets
++ * are going to be written to). The idea is that we don't want the verifier to
++ * reject future reads that access slots written to through variable offsets.
++ */
++static int check_stack_write_var_off(struct bpf_verifier_env *env,
++ /* func where register points to */
++ struct bpf_func_state *state,
++ int ptr_regno, int off, int size,
++ int value_regno, int insn_idx)
++{
++ struct bpf_func_state *cur; /* state of the current function */
++ int min_off, max_off;
++ int i, err;
++ struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
++ bool writing_zero = false;
++ /* set if the fact that we're writing a zero is used to let any
++ * stack slots remain STACK_ZERO
++ */
++ bool zero_used = false;
++
++ cur = env->cur_state->frame[env->cur_state->curframe];
++ ptr_reg = &cur->regs[ptr_regno];
++ min_off = ptr_reg->smin_value + off;
++ max_off = ptr_reg->smax_value + off + size;
++ if (value_regno >= 0)
++ value_reg = &cur->regs[value_regno];
++ if (value_reg && register_is_null(value_reg))
++ writing_zero = true;
++
++ err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
++ state->acquired_refs, true);
++ if (err)
++ return err;
++
++
++ /* Variable offset writes destroy any spilled pointers in range. */
++ for (i = min_off; i < max_off; i++) {
++ u8 new_type, *stype;
++ int slot, spi;
++
++ slot = -i - 1;
++ spi = slot / BPF_REG_SIZE;
++ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
++
++ if (!env->allow_ptr_leaks
++ && *stype != NOT_INIT
++ && *stype != SCALAR_VALUE) {
++ /* Reject the write if there's are spilled pointers in
++ * range. If we didn't reject here, the ptr status
++ * would be erased below (even though not all slots are
++ * actually overwritten), possibly opening the door to
++ * leaks.
++ */
++ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
++ insn_idx, i);
++ return -EINVAL;
++ }
++
++ /* Erase all spilled pointers. */
++ state->stack[spi].spilled_ptr.type = NOT_INIT;
++
++ /* Update the slot type. */
++ new_type = STACK_MISC;
++ if (writing_zero && *stype == STACK_ZERO) {
++ new_type = STACK_ZERO;
++ zero_used = true;
++ }
++ /* If the slot is STACK_INVALID, we check whether it's OK to
++ * pretend that it will be initialized by this write. The slot
++ * might not actually be written to, and so if we mark it as
++ * initialized future reads might leak uninitialized memory.
++ * For privileged programs, we will accept such reads to slots
++ * that may or may not be written because, if we're reject
++ * them, the error would be too confusing.
++ */
++ if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
++ verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
++ insn_idx, i);
++ return -EINVAL;
++ }
++ *stype = new_type;
++ }
++ if (zero_used) {
++ /* backtracking doesn't work for STACK_ZERO yet. */
++ err = mark_chain_precision(env, value_regno);
++ if (err)
++ return err;
++ }
++ return 0;
++}
++
++/* When register 'dst_regno' is assigned some values from stack[min_off,
++ * max_off), we set the register's type according to the types of the
++ * respective stack slots. If all the stack values are known to be zeros, then
++ * so is the destination reg. Otherwise, the register is considered to be
++ * SCALAR. This function does not deal with register filling; the caller must
++ * ensure that all spilled registers in the stack range have been marked as
++ * read.
++ */
++static void mark_reg_stack_read(struct bpf_verifier_env *env,
++ /* func where src register points to */
++ struct bpf_func_state *ptr_state,
++ int min_off, int max_off, int dst_regno)
++{
++ struct bpf_verifier_state *vstate = env->cur_state;
++ struct bpf_func_state *state = vstate->frame[vstate->curframe];
++ int i, slot, spi;
++ u8 *stype;
++ int zeros = 0;
++
++ for (i = min_off; i < max_off; i++) {
++ slot = -i - 1;
++ spi = slot / BPF_REG_SIZE;
++ stype = ptr_state->stack[spi].slot_type;
++ if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
++ break;
++ zeros++;
++ }
++ if (zeros == max_off - min_off) {
++ /* any access_size read into register is zero extended,
++ * so the whole register == const_zero
++ */
++ __mark_reg_const_zero(&state->regs[dst_regno]);
++ /* backtracking doesn't support STACK_ZERO yet,
++ * so mark it precise here, so that later
++ * backtracking can stop here.
++ * Backtracking may not need this if this register
++ * doesn't participate in pointer adjustment.
++ * Forward propagation of precise flag is not
++ * necessary either. This mark is only to stop
++ * backtracking. Any register that contributed
++ * to const 0 was marked precise before spill.
++ */
++ state->regs[dst_regno].precise = true;
++ } else {
++ /* have read misc data from the stack */
++ mark_reg_unknown(env, state->regs, dst_regno);
++ }
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
++}
++
++/* Read the stack at 'off' and put the results into the register indicated by
++ * 'dst_regno'. It handles reg filling if the addressed stack slot is a
++ * spilled reg.
++ *
++ * 'dst_regno' can be -1, meaning that the read value is not going to a
++ * register.
++ *
++ * The access is assumed to be within the current stack bounds.
++ */
++static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
++ /* func where src register points to */
++ struct bpf_func_state *reg_state,
++ int off, int size, int dst_regno)
+ {
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+@@ -2412,11 +2580,6 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg;
+ u8 *stype;
+
+- if (reg_state->allocated_stack <= slot) {
+- verbose(env, "invalid read from stack off %d+0 size %d\n",
+- off, size);
+- return -EACCES;
+- }
+ stype = reg_state->stack[spi].slot_type;
+ reg = &reg_state->stack[spi].spilled_ptr;
+
+@@ -2427,9 +2590,9 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ verbose(env, "invalid size of register fill\n");
+ return -EACCES;
+ }
+- if (value_regno >= 0) {
+- mark_reg_unknown(env, state->regs, value_regno);
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
++ if (dst_regno >= 0) {
++ mark_reg_unknown(env, state->regs, dst_regno);
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+ }
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ return 0;
+@@ -2441,16 +2604,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ }
+ }
+
+- if (value_regno >= 0) {
++ if (dst_regno >= 0) {
+ /* restore register state from stack */
+- state->regs[value_regno] = *reg;
++ state->regs[dst_regno] = *reg;
+ /* mark reg as written since spilled pointer state likely
+ * has its liveness marks cleared by is_state_visited()
+ * which resets stack/reg liveness for state transitions
+ */
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
++ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+ } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
+- /* If value_regno==-1, the caller is asking us whether
++ /* If dst_regno==-1, the caller is asking us whether
+ * it is acceptable to use this value as a SCALAR_VALUE
+ * (e.g. for XADD).
+ * We must not allow unprivileged callers to do that
+@@ -2462,70 +2625,167 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ }
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ } else {
+- int zeros = 0;
++ u8 type;
+
+ for (i = 0; i < size; i++) {
+- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
++ type = stype[(slot - i) % BPF_REG_SIZE];
++ if (type == STACK_MISC)
+ continue;
+- if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+- zeros++;
++ if (type == STACK_ZERO)
+ continue;
+- }
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ return -EACCES;
+ }
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+- if (value_regno >= 0) {
+- if (zeros == size) {
+- /* any size read into register is zero extended,
+- * so the whole register == const_zero
+- */
+- __mark_reg_const_zero(&state->regs[value_regno]);
+- /* backtracking doesn't support STACK_ZERO yet,
+- * so mark it precise here, so that later
+- * backtracking can stop here.
+- * Backtracking may not need this if this register
+- * doesn't participate in pointer adjustment.
+- * Forward propagation of precise flag is not
+- * necessary either. This mark is only to stop
+- * backtracking. Any register that contributed
+- * to const 0 was marked precise before spill.
+- */
+- state->regs[value_regno].precise = true;
+- } else {
+- /* have read misc data from the stack */
+- mark_reg_unknown(env, state->regs, value_regno);
+- }
+- state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+- }
++ if (dst_regno >= 0)
++ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
+ }
+ return 0;
+ }
+
+-static int check_stack_access(struct bpf_verifier_env *env,
+- const struct bpf_reg_state *reg,
+- int off, int size)
++enum stack_access_src {
++ ACCESS_DIRECT = 1, /* the access is performed by an instruction */
++ ACCESS_HELPER = 2, /* the access is performed by a helper */
++};
++
++static int check_stack_range_initialized(struct bpf_verifier_env *env,
++ int regno, int off, int access_size,
++ bool zero_size_allowed,
++ enum stack_access_src type,
++ struct bpf_call_arg_meta *meta);
++
++static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
++{
++ return cur_regs(env) + regno;
++}
++
++/* Read the stack at 'ptr_regno + off' and put the result into the register
++ * 'dst_regno'.
++ * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
++ * but not its variable offset.
++ * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
++ *
++ * As opposed to check_stack_read_fixed_off, this function doesn't deal with
++ * filling registers (i.e. reads of spilled register cannot be detected when
++ * the offset is not fixed). We conservatively mark 'dst_regno' as containing
++ * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
++ * offset; for a fixed offset check_stack_read_fixed_off should be used
++ * instead.
++ */
++static int check_stack_read_var_off(struct bpf_verifier_env *env,
++ int ptr_regno, int off, int size, int dst_regno)
++{
++ /* The state of the source register. */
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++ struct bpf_func_state *ptr_state = func(env, reg);
++ int err;
++ int min_off, max_off;
++
++ /* Note that we pass a NULL meta, so raw access will not be permitted.
++ */
++ err = check_stack_range_initialized(env, ptr_regno, off, size,
++ false, ACCESS_DIRECT, NULL);
++ if (err)
++ return err;
++
++ min_off = reg->smin_value + off;
++ max_off = reg->smax_value + off;
++ mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
++ return 0;
++}
++
++/* check_stack_read dispatches to check_stack_read_fixed_off or
++ * check_stack_read_var_off.
++ *
++ * The caller must ensure that the offset falls within the allocated stack
++ * bounds.
++ *
++ * 'dst_regno' is a register which will receive the value from the stack. It
++ * can be -1, meaning that the read value is not going to a register.
++ */
++static int check_stack_read(struct bpf_verifier_env *env,
++ int ptr_regno, int off, int size,
++ int dst_regno)
+ {
+- /* Stack accesses must be at a fixed offset, so that we
+- * can determine what type of data were returned. See
+- * check_stack_read().
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++ struct bpf_func_state *state = func(env, reg);
++ int err;
++ /* Some accesses are only permitted with a static offset. */
++ bool var_off = !tnum_is_const(reg->var_off);
++
++ /* The offset is required to be static when reads don't go to a
++ * register, in order to not leak pointers (see
++ * check_stack_read_fixed_off).
+ */
+- if (!tnum_is_const(reg->var_off)) {
++ if (dst_regno < 0 && var_off) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+- verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
++ verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
+ tn_buf, off, size);
+ return -EACCES;
+ }
++ /* Variable offset is prohibited for unprivileged mode for simplicity
++ * since it requires corresponding support in Spectre masking for stack
++ * ALU. See also retrieve_ptr_limit().
++ */
++ if (!env->bypass_spec_v1 && var_off) {
++ char tn_buf[48];
+
+- if (off >= 0 || off < -MAX_BPF_STACK) {
+- verbose(env, "invalid stack off=%d size=%d\n", off, size);
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++ verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
++ ptr_regno, tn_buf);
+ return -EACCES;
+ }
+
+- return 0;
++ if (!var_off) {
++ off += reg->var_off.value;
++ err = check_stack_read_fixed_off(env, state, off, size,
++ dst_regno);
++ } else {
++ /* Variable offset stack reads need more conservative handling
++ * than fixed offset ones. Note that dst_regno >= 0 on this
++ * branch.
++ */
++ err = check_stack_read_var_off(env, ptr_regno, off, size,
++ dst_regno);
++ }
++ return err;
++}
++
++
++/* check_stack_write dispatches to check_stack_write_fixed_off or
++ * check_stack_write_var_off.
++ *
++ * 'ptr_regno' is the register used as a pointer into the stack.
++ * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
++ * 'value_regno' is the register whose value we're writing to the stack. It can
++ * be -1, meaning that we're not writing from a register.
++ *
++ * The caller must ensure that the offset falls within the maximum stack size.
++ */
++static int check_stack_write(struct bpf_verifier_env *env,
++ int ptr_regno, int off, int size,
++ int value_regno, int insn_idx)
++{
++ struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++ struct bpf_func_state *state = func(env, reg);
++ int err;
++
++ if (tnum_is_const(reg->var_off)) {
++ off += reg->var_off.value;
++ err = check_stack_write_fixed_off(env, state, off, size,
++ value_regno, insn_idx);
++ } else {
++ /* Variable offset stack reads need more conservative handling
++ * than fixed offset ones.
++ */
++ err = check_stack_write_var_off(env, state,
++ ptr_regno, off, size,
++ value_regno, insn_idx);
++ }
++ return err;
+ }
+
+ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
+@@ -2858,11 +3118,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+ return -EACCES;
+ }
+
+-static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
+-{
+- return cur_regs(env) + regno;
+-}
+-
+ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+ {
+ return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
+@@ -2981,8 +3236,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
+ break;
+ case PTR_TO_STACK:
+ pointer_desc = "stack ";
+- /* The stack spill tracking logic in check_stack_write()
+- * and check_stack_read() relies on stack accesses being
++ /* The stack spill tracking logic in check_stack_write_fixed_off()
++ * and check_stack_read_fixed_off() relies on stack accesses being
+ * aligned.
+ */
+ strict = true;
+@@ -3400,6 +3655,91 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+ return 0;
+ }
+
++/* Check that the stack access at the given offset is within bounds. The
++ * maximum valid offset is -1.
++ *
++ * The minimum valid offset is -MAX_BPF_STACK for writes, and
++ * -state->allocated_stack for reads.
++ */
++static int check_stack_slot_within_bounds(int off,
++ struct bpf_func_state *state,
++ enum bpf_access_type t)
++{
++ int min_valid_off;
++
++ if (t == BPF_WRITE)
++ min_valid_off = -MAX_BPF_STACK;
++ else
++ min_valid_off = -state->allocated_stack;
++
++ if (off < min_valid_off || off > -1)
++ return -EACCES;
++ return 0;
++}
++
++/* Check that the stack access at 'regno + off' falls within the maximum stack
++ * bounds.
++ *
++ * 'off' includes `regno->offset`, but not its dynamic part (if any).
++ */
++static int check_stack_access_within_bounds(
++ struct bpf_verifier_env *env,
++ int regno, int off, int access_size,
++ enum stack_access_src src, enum bpf_access_type type)
++{
++ struct bpf_reg_state *regs = cur_regs(env);
++ struct bpf_reg_state *reg = regs + regno;
++ struct bpf_func_state *state = func(env, reg);
++ int min_off, max_off;
++ int err;
++ char *err_extra;
++
++ if (src == ACCESS_HELPER)
++ /* We don't know if helpers are reading or writing (or both). */
++ err_extra = " indirect access to";
++ else if (type == BPF_READ)
++ err_extra = " read from";
++ else
++ err_extra = " write to";
++
++ if (tnum_is_const(reg->var_off)) {
++ min_off = reg->var_off.value + off;
++ if (access_size > 0)
++ max_off = min_off + access_size - 1;
++ else
++ max_off = min_off;
++ } else {
++ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
++ reg->smin_value <= -BPF_MAX_VAR_OFF) {
++ verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
++ err_extra, regno);
++ return -EACCES;
++ }
++ min_off = reg->smin_value + off;
++ if (access_size > 0)
++ max_off = reg->smax_value + off + access_size - 1;
++ else
++ max_off = min_off;
++ }
++
++ err = check_stack_slot_within_bounds(min_off, state, type);
++ if (!err)
++ err = check_stack_slot_within_bounds(max_off, state, type);
++
++ if (err) {
++ if (tnum_is_const(reg->var_off)) {
++ verbose(env, "invalid%s stack R%d off=%d size=%d\n",
++ err_extra, regno, off, access_size);
++ } else {
++ char tn_buf[48];
++
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++ verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
++ err_extra, regno, tn_buf, access_size);
++ }
++ }
++ return err;
++}
+
+ /* check whether memory at (regno + off) is accessible for t = (read | write)
+ * if t==write, value_regno is a register which value is stored into memory
+@@ -3515,8 +3855,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ }
+
+ } else if (reg->type == PTR_TO_STACK) {
+- off += reg->var_off.value;
+- err = check_stack_access(env, reg, off, size);
++ /* Basic bounds checks. */
++ err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
+ if (err)
+ return err;
+
+@@ -3525,12 +3865,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ if (err)
+ return err;
+
+- if (t == BPF_WRITE)
+- err = check_stack_write(env, state, off, size,
+- value_regno, insn_idx);
+- else
+- err = check_stack_read(env, state, off, size,
++ if (t == BPF_READ)
++ err = check_stack_read(env, regno, off, size,
+ value_regno);
++ else
++ err = check_stack_write(env, regno, off, size,
++ value_regno, insn_idx);
+ } else if (reg_is_pkt_pointer(reg)) {
+ if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
+ verbose(env, "cannot write into packet\n");
+@@ -3652,49 +3992,53 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+ }
+
+-static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
+- int off, int access_size,
+- bool zero_size_allowed)
++/* When register 'regno' is used to read the stack (either directly or through
++ * a helper function) make sure that it's within stack boundary and, depending
++ * on the access type, that all elements of the stack are initialized.
++ *
++ * 'off' includes 'regno->off', but not its dynamic part (if any).
++ *
++ * All registers that have been spilled on the stack in the slots within the
++ * read offsets are marked as read.
++ */
++static int check_stack_range_initialized(
++ struct bpf_verifier_env *env, int regno, int off,
++ int access_size, bool zero_size_allowed,
++ enum stack_access_src type, struct bpf_call_arg_meta *meta)
+ {
+ struct bpf_reg_state *reg = reg_state(env, regno);
++ struct bpf_func_state *state = func(env, reg);
++ int err, min_off, max_off, i, j, slot, spi;
++ char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
++ enum bpf_access_type bounds_check_type;
++ /* Some accesses can write anything into the stack, others are
++ * read-only.
++ */
++ bool clobber = false;
+
+- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
+- access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
+- if (tnum_is_const(reg->var_off)) {
+- verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
+- regno, off, access_size);
+- } else {
+- char tn_buf[48];
+-
+- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+- verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
+- regno, tn_buf, access_size);
+- }
++ if (access_size == 0 && !zero_size_allowed) {
++ verbose(env, "invalid zero-sized read\n");
+ return -EACCES;
+ }
+- return 0;
+-}
+
+-/* when register 'regno' is passed into function that will read 'access_size'
+- * bytes from that pointer, make sure that it's within stack boundary
+- * and all elements of stack are initialized.
+- * Unlike most pointer bounds-checking functions, this one doesn't take an
+- * 'off' argument, so it has to add in reg->off itself.
+- */
+-static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+- int access_size, bool zero_size_allowed,
+- struct bpf_call_arg_meta *meta)
+-{
+- struct bpf_reg_state *reg = reg_state(env, regno);
+- struct bpf_func_state *state = func(env, reg);
+- int err, min_off, max_off, i, j, slot, spi;
++ if (type == ACCESS_HELPER) {
++ /* The bounds checks for writes are more permissive than for
++ * reads. However, if raw_mode is not set, we'll do extra
++ * checks below.
++ */
++ bounds_check_type = BPF_WRITE;
++ clobber = true;
++ } else {
++ bounds_check_type = BPF_READ;
++ }
++ err = check_stack_access_within_bounds(env, regno, off, access_size,
++ type, bounds_check_type);
++ if (err)
++ return err;
++
+
+ if (tnum_is_const(reg->var_off)) {
+- min_off = max_off = reg->var_off.value + reg->off;
+- err = __check_stack_boundary(env, regno, min_off, access_size,
+- zero_size_allowed);
+- if (err)
+- return err;
++ min_off = max_off = reg->var_off.value + off;
+ } else {
+ /* Variable offset is prohibited for unprivileged mode for
+ * simplicity since it requires corresponding support in
+@@ -3705,8 +4049,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+- verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
+- regno, tn_buf);
++ verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
++ regno, err_extra, tn_buf);
+ return -EACCES;
+ }
+ /* Only initialized buffer on stack is allowed to be accessed
+@@ -3718,28 +4062,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ if (meta && meta->raw_mode)
+ meta = NULL;
+
+- if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+- reg->smax_value <= -BPF_MAX_VAR_OFF) {
+- verbose(env, "R%d unbounded indirect variable offset stack access\n",
+- regno);
+- return -EACCES;
+- }
+- min_off = reg->smin_value + reg->off;
+- max_off = reg->smax_value + reg->off;
+- err = __check_stack_boundary(env, regno, min_off, access_size,
+- zero_size_allowed);
+- if (err) {
+- verbose(env, "R%d min value is outside of stack bound\n",
+- regno);
+- return err;
+- }
+- err = __check_stack_boundary(env, regno, max_off, access_size,
+- zero_size_allowed);
+- if (err) {
+- verbose(env, "R%d max value is outside of stack bound\n",
+- regno);
+- return err;
+- }
++ min_off = reg->smin_value + off;
++ max_off = reg->smax_value + off;
+ }
+
+ if (meta && meta->raw_mode) {
+@@ -3759,8 +4083,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ if (*stype == STACK_MISC)
+ goto mark;
+ if (*stype == STACK_ZERO) {
+- /* helper can write anything into the stack */
+- *stype = STACK_MISC;
++ if (clobber) {
++ /* helper can write anything into the stack */
++ *stype = STACK_MISC;
++ }
+ goto mark;
+ }
+
+@@ -3771,22 +4097,24 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ env->allow_ptr_leaks)) {
+- __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
+- for (j = 0; j < BPF_REG_SIZE; j++)
+- state->stack[spi].slot_type[j] = STACK_MISC;
++ if (clobber) {
++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
++ for (j = 0; j < BPF_REG_SIZE; j++)
++ state->stack[spi].slot_type[j] = STACK_MISC;
++ }
+ goto mark;
+ }
+
+ err:
+ if (tnum_is_const(reg->var_off)) {
+- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+- min_off, i - min_off, access_size);
++ verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
++ err_extra, regno, min_off, i - min_off, access_size);
+ } else {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+- verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
+- tn_buf, i - min_off, access_size);
++ verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
++ err_extra, regno, tn_buf, i - min_off, access_size);
+ }
+ return -EACCES;
+ mark:
+@@ -3835,8 +4163,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ "rdwr",
+ &env->prog->aux->max_rdwr_access);
+ case PTR_TO_STACK:
+- return check_stack_boundary(env, regno, access_size,
+- zero_size_allowed, meta);
++ return check_stack_range_initialized(
++ env,
++ regno, reg->off, access_size,
++ zero_size_allowed, ACCESS_HELPER, meta);
+ default: /* scalar_value or invalid ptr */
+ /* Allow zero-byte read from NULL, regardless of pointer type */
+ if (zero_size_allowed && access_size == 0 &&
+@@ -5399,7 +5729,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+- u32 off, max = 0, ptr_limit = 0;
++ u32 max = 0, ptr_limit = 0;
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -5408,26 +5738,18 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+- * left direction, see BPF_REG_FP.
++ * left direction, see BPF_REG_FP. Also, unknown scalar
++ * offset where we would need to deal with min/max bounds is
++ * currently prohibited for unprivileged.
+ */
+ max = MAX_BPF_STACK + mask_to_left;
+- /* Indirect variable offset stack access is prohibited in
+- * unprivileged mode so it's not handled here.
+- */
+- off = ptr_reg->off + ptr_reg->var_off.value;
+- if (mask_to_left)
+- ptr_limit = MAX_BPF_STACK + off;
+- else
+- ptr_limit = -off - 1;
++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+ break;
+ case PTR_TO_MAP_VALUE:
+ max = ptr_reg->map_ptr->value_size;
+- if (mask_to_left) {
+- ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+- } else {
+- off = ptr_reg->smin_value + ptr_reg->off;
+- ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+- }
++ ptr_limit = (mask_to_left ?
++ ptr_reg->smin_value :
++ ptr_reg->umax_value) + ptr_reg->off;
+ break;
+ default:
+ return REASON_TYPE;
+@@ -5482,10 +5804,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+- struct bpf_reg_state *dst_reg)
++ struct bpf_reg_state *dst_reg,
++ struct bpf_insn_aux_data *tmp_aux,
++ const bool commit_window)
+ {
++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_verifier_state *vstate = env->cur_state;
+- struct bpf_insn_aux_data *aux = cur_aux(env);
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ u8 opcode = BPF_OP(insn->code);
+@@ -5504,18 +5828,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ if (vstate->speculative)
+ goto do_sim;
+
+- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+- alu_state |= ptr_is_dst_reg ?
+- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+-
+ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (err < 0)
+ return err;
+
++ if (commit_window) {
++ /* In commit phase we narrow the masking window based on
++ * the observed pointer move after the simulated operation.
++ */
++ alu_state = tmp_aux->alu_state;
++ alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++ } else {
++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++ alu_state |= ptr_is_dst_reg ?
++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
++ }
++
+ err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+ if (err < 0)
+ return err;
+ do_sim:
++ /* If we're in commit phase, we're done here given we already
++ * pushed the truncated dst_reg into the speculative verification
++ * stack.
++ */
++ if (commit_window)
++ return 0;
++
+ /* Simulate and find potential out-of-bounds access under
+ * speculative execution from truncation as a result of
+ * masking when off was not within expected range. If off
+@@ -5574,6 +5913,72 @@ static int sanitize_err(struct bpf_verifier_env *env,
+ return -EACCES;
+ }
+
++/* check that stack access falls within stack limits and that 'reg' doesn't
++ * have a variable offset.
++ *
++ * Variable offset is prohibited for unprivileged mode for simplicity since it
++ * requires corresponding support in Spectre masking for stack ALU. See also
++ * retrieve_ptr_limit().
++ *
++ *
++ * 'off' includes 'reg->off'.
++ */
++static int check_stack_access_for_ptr_arithmetic(
++ struct bpf_verifier_env *env,
++ int regno,
++ const struct bpf_reg_state *reg,
++ int off)
++{
++ if (!tnum_is_const(reg->var_off)) {
++ char tn_buf[48];
++
++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++ verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
++ regno, tn_buf, off);
++ return -EACCES;
++ }
++
++ if (off >= 0 || off < -MAX_BPF_STACK) {
++ verbose(env, "R%d stack pointer arithmetic goes out of range, "
++ "prohibited for !root; off=%d\n", regno, off);
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static int sanitize_check_bounds(struct bpf_verifier_env *env,
++ const struct bpf_insn *insn,
++ const struct bpf_reg_state *dst_reg)
++{
++ u32 dst = insn->dst_reg;
++
++ /* For unprivileged we require that resulting offset must be in bounds
++ * in order to be able to sanitize access later on.
++ */
++ if (env->bypass_spec_v1)
++ return 0;
++
++ switch (dst_reg->type) {
++ case PTR_TO_STACK:
++ if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
++ dst_reg->off + dst_reg->var_off.value))
++ return -EACCES;
++ break;
++ case PTR_TO_MAP_VALUE:
++ if (check_map_access(env, dst, dst_reg->off, 1, false)) {
++ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
++ "prohibited for !root\n", dst);
++ return -EACCES;
++ }
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+ * Caller should also handle BPF_MOV case separately.
+ * If we return -EACCES, caller may want to try again treating pointer as a
+@@ -5592,6 +5997,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
++ struct bpf_insn_aux_data tmp_aux = {};
+ u8 opcode = BPF_OP(insn->code);
+ u32 dst = insn->dst_reg;
+ int ret;
+@@ -5658,12 +6064,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ /* pointer types do not carry 32-bit bounds at the moment. */
+ __mark_reg32_unbounded(dst_reg);
+
+- switch (opcode) {
+- case BPF_ADD:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
++ &tmp_aux, false);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++ }
+
++ switch (opcode) {
++ case BPF_ADD:
+ /* We can take a fixed offset as long as it doesn't overflow
+ * the s32 'off' field
+ */
+@@ -5714,10 +6123,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ }
+ break;
+ case BPF_SUB:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, off_reg, dst_reg);
+-
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+ verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -5798,22 +6203,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ __reg_deduce_bounds(dst_reg);
+ __reg_bound_offset(dst_reg);
+
+- /* For unprivileged we require that resulting offset must be in bounds
+- * in order to be able to sanitize access later on.
+- */
+- if (!env->bypass_spec_v1) {
+- if (dst_reg->type == PTR_TO_MAP_VALUE &&
+- check_map_access(env, dst, dst_reg->off, 1, false)) {
+- verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+- "prohibited for !root\n", dst);
+- return -EACCES;
+- } else if (dst_reg->type == PTR_TO_STACK &&
+- check_stack_access(env, dst_reg, dst_reg->off +
+- dst_reg->var_off.value, 1)) {
+- verbose(env, "R%d stack pointer arithmetic goes out of range, "
+- "prohibited for !root\n", dst);
+- return -EACCES;
+- }
++ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
++ return -EACCES;
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
++ &tmp_aux, true);
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ }
+
+ return 0;
+@@ -12078,6 +12474,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+ env->strict_alignment = false;
+
+ env->allow_ptr_leaks = bpf_allow_ptr_leaks();
++ env->allow_uninit_stack = bpf_allow_uninit_stack();
+ env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
+ env->bypass_spec_v1 = bpf_bypass_spec_v1();
+ env->bypass_spec_v4 = bpf_bypass_spec_v4();
+diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
+index fe9ca92faa2a7..909b0bf22a1ec 100644
+--- a/kernel/locking/qrwlock.c
++++ b/kernel/locking/qrwlock.c
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
+ */
+ void queued_write_lock_slowpath(struct qrwlock *lock)
+ {
++ int cnts;
++
+ /* Put the writer into the wait queue */
+ arch_spin_lock(&lock->wait_lock);
+
+@@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
+
+ /* When no more readers or writers, set the locked flag */
+ do {
+- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+- _QW_LOCKED) != _QW_WAITING);
++ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
++ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
+ unlock:
+ arch_spin_unlock(&lock->wait_lock);
+ }
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index e2a0ed5d02f01..c87c4df8703d4 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -79,7 +79,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
++ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
+diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
+index 4d471d9511a54..6fffe56827134 100644
+--- a/tools/arch/ia64/include/asm/barrier.h
++++ b/tools/arch/ia64/include/asm/barrier.h
+@@ -39,9 +39,6 @@
+ * sequential memory pages only.
+ */
+
+-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
+-#define ia64_mf() asm volatile ("mf" ::: "memory")
+-
+ #define mb() ia64_mf()
+ #define rmb() mb()
+ #define wmb() mb()
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 2723082f38170..e7a071a154706 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -634,7 +634,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
+ break;
+ }
+
+- if (itr)
++ if (itr && itr->parse_snapshot_options)
+ return itr->parse_snapshot_options(itr, opts, str);
+
+ pr_err("No AUX area tracing to snapshot\n");
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index e2537d5acab09..f4d44f75ba152 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -836,15 +836,18 @@ out:
+ int maps__clone(struct thread *thread, struct maps *parent)
+ {
+ struct maps *maps = thread->maps;
+- int err = -ENOMEM;
++ int err;
+ struct map *map;
+
+ down_read(&parent->lock);
+
+ maps__for_each_entry(parent, map) {
+ struct map *new = map__clone(map);
+- if (new == NULL)
++
++ if (new == NULL) {
++ err = -ENOMEM;
+ goto out_unlock;
++ }
+
+ err = unwind__prepare_access(maps, new, NULL);
+ if (err)