diff options
author | Mike Pagano <mpagano@gentoo.org> | 2020-12-30 07:54:04 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2020-12-30 07:54:04 -0500 |
commit | b6de6417a82978446b2e3e3bec49271f305452e6 (patch) | |
tree | c81e308fa3cd958931773193a9f0607195bc3dfa /1003_linux-5.10.4.patch | |
parent | Remove redundant f2fs patch. (diff) | |
download | linux-patches-b6de6417a82978446b2e3e3bec49271f305452e6.tar.gz linux-patches-b6de6417a82978446b2e3e3bec49271f305452e6.tar.bz2 linux-patches-b6de6417a82978446b2e3e3bec49271f305452e6.zip |
Linux patch 5.10.45.10-6
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1003_linux-5.10.4.patch')
-rw-r--r-- | 1003_linux-5.10.4.patch | 23858 |
1 files changed, 23858 insertions, 0 deletions
diff --git a/1003_linux-5.10.4.patch b/1003_linux-5.10.4.patch new file mode 100644 index 00000000..a623431e --- /dev/null +++ b/1003_linux-5.10.4.patch @@ -0,0 +1,23858 @@ +diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst +index a334b584f2b34..64405e5da63e4 100644 +--- a/Documentation/locking/seqlock.rst ++++ b/Documentation/locking/seqlock.rst +@@ -89,7 +89,7 @@ Read path:: + + .. _seqcount_locktype_t: + +-Sequence counters with associated locks (``seqcount_LOCKTYPE_t``) ++Sequence counters with associated locks (``seqcount_LOCKNAME_t``) + ----------------------------------------------------------------- + + As discussed at :ref:`seqcount_t`, sequence count write side critical +@@ -115,27 +115,26 @@ The following sequence counters with associated locks are defined: + - ``seqcount_mutex_t`` + - ``seqcount_ww_mutex_t`` + +-The plain seqcount read and write APIs branch out to the specific +-seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel +-API explosion per each new seqcount LOCKTYPE. ++The sequence counter read and write APIs can take either a plain ++seqcount_t or any of the seqcount_LOCKNAME_t variants above. + +-Initialization (replace "LOCKTYPE" with one of the supported locks):: ++Initialization (replace "LOCKNAME" with one of the supported locks):: + + /* dynamic */ +- seqcount_LOCKTYPE_t foo_seqcount; +- seqcount_LOCKTYPE_init(&foo_seqcount, &lock); ++ seqcount_LOCKNAME_t foo_seqcount; ++ seqcount_LOCKNAME_init(&foo_seqcount, &lock); + + /* static */ +- static seqcount_LOCKTYPE_t foo_seqcount = +- SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock); ++ static seqcount_LOCKNAME_t foo_seqcount = ++ SEQCNT_LOCKNAME_ZERO(foo_seqcount, &lock); + + /* C99 struct init */ + struct { +- .seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock), ++ .seq = SEQCNT_LOCKNAME_ZERO(foo.seq, &lock), + } foo; + + Write path: same as in :ref:`seqcount_t`, while running from a context +-with the associated LOCKTYPE lock acquired. ++with the associated write serialization lock acquired. + + Read path: same as in :ref:`seqcount_t`. + +diff --git a/Documentation/x86/topology.rst b/Documentation/x86/topology.rst +index e29739904e37e..7f58010ea86af 100644 +--- a/Documentation/x86/topology.rst ++++ b/Documentation/x86/topology.rst +@@ -41,6 +41,8 @@ Package + Packages contain a number of cores plus shared resources, e.g. DRAM + controller, shared caches etc. + ++Modern systems may also use the term 'Die' for package. ++ + AMD nomenclature for package is 'Node'. + + Package-related topology information in the kernel: +@@ -53,11 +55,18 @@ Package-related topology information in the kernel: + + The number of dies in a package. This information is retrieved via CPUID. + ++ - cpuinfo_x86.cpu_die_id: ++ ++ The physical ID of the die. This information is retrieved via CPUID. ++ + - cpuinfo_x86.phys_proc_id: + + The physical ID of the package. This information is retrieved via CPUID + and deduced from the APIC IDs of the cores in the package. + ++ Modern systems use this value for the socket. There may be multiple ++ packages within a socket. This value may differ from cpu_die_id. ++ + - cpuinfo_x86.logical_proc_id: + + The logical ID of the package. As we do not trust BIOSes to enumerate the +diff --git a/Makefile b/Makefile +index a72bc404123d5..1e50d6af932ab 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 3 ++SUBLEVEL = 4 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/Kconfig b/arch/Kconfig +index ba4e966484ab5..ddd4641446bdd 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -143,6 +143,22 @@ config UPROBES + managed by the kernel and kept transparent to the probed + application. ) + ++config HAVE_64BIT_ALIGNED_ACCESS ++ def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS ++ help ++ Some architectures require 64 bit accesses to be 64 bit ++ aligned, which also requires structs containing 64 bit values ++ to be 64 bit aligned too. This includes some 32 bit ++ architectures which can do 64 bit accesses, as well as 64 bit ++ architectures without unaligned access. ++ ++ This symbol should be selected by an architecture if 64 bit ++ accesses are required to be 64 bit aligned in this way even ++ though it is not a 64 bit architecture. ++ ++ See Documentation/unaligned-memory-access.txt for more ++ information on the topic of unaligned memory accesses. ++ + config HAVE_EFFICIENT_UNALIGNED_ACCESS + bool + help +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S +index caa27322a0ab7..3a392983ac079 100644 +--- a/arch/arm/boot/compressed/head.S ++++ b/arch/arm/boot/compressed/head.S +@@ -116,7 +116,7 @@ + /* + * Debug print of the final appended DTB location + */ +- .macro dbgadtb, begin, end ++ .macro dbgadtb, begin, size + #ifdef DEBUG + kputc #'D' + kputc #'T' +@@ -129,7 +129,7 @@ + kputc #'(' + kputc #'0' + kputc #'x' +- kphex \end, 8 /* End of appended DTB */ ++ kphex \size, 8 /* Size of appended DTB */ + kputc #')' + kputc #'\n' + #endif +diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +index 654648b05c7c2..aeccedd125740 100644 +--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi ++++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +@@ -266,11 +266,6 @@ + reg = <0x11000 0x100>; + }; + +-&i2c1 { +- compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c"; +- reg = <0x11100 0x100>; +-}; +- + &mpic { + reg = <0x20a00 0x2d0>, <0x21070 0x58>; + }; +diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts +index 2d44d9ad4e400..e6ad821a86359 100644 +--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts ++++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts +@@ -82,11 +82,6 @@ + status = "okay"; + }; + +-&vuart { +- // VUART Host Console +- status = "okay"; +-}; +- + &uart1 { + // Host Console + status = "okay"; +diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts +index 1deb30ec912cf..6e9baf3bba531 100644 +--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts ++++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts +@@ -22,9 +22,9 @@ + #size-cells = <1>; + ranges; + +- vga_memory: framebuffer@7f000000 { ++ vga_memory: framebuffer@9f000000 { + no-map; +- reg = <0x7f000000 0x01000000>; ++ reg = <0x9f000000 0x01000000>; /* 16M */ + }; + }; + +diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +index 4d070d6ba09f9..e86c22ce6d123 100644 +--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts ++++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +@@ -26,7 +26,7 @@ + #size-cells = <1>; + ranges; + +- flash_memory: region@ba000000 { ++ flash_memory: region@b8000000 { + no-map; + reg = <0xb8000000 0x4000000>; /* 64M */ + }; +diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi +index b58220a49cbd8..bf97aaad7be9b 100644 +--- a/arch/arm/boot/dts/aspeed-g6.dtsi ++++ b/arch/arm/boot/dts/aspeed-g6.dtsi +@@ -357,7 +357,7 @@ + #gpio-cells = <2>; + gpio-controller; + compatible = "aspeed,ast2600-gpio"; +- reg = <0x1e780000 0x800>; ++ reg = <0x1e780000 0x400>; + interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; + gpio-ranges = <&pinctrl 0 0 208>; + ngpios = <208>; +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts +index eae28b82c7fd0..73b6b1f89de99 100644 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts +@@ -569,11 +569,14 @@ + atmel,pins = <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; + }; + }; +-}; /* pinctrl */ + +-&pmc { +- atmel,osc-bypass; +-}; ++ usb1 { ++ pinctrl_usb_default: usb_default { ++ atmel,pins = <AT91_PIOD 15 AT91_PERIPH_GPIO AT91_PINCTRL_NONE ++ AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; ++ }; ++ }; ++}; /* pinctrl */ + + &pwm0 { + pinctrl-names = "default"; +@@ -684,6 +687,8 @@ + atmel,vbus-gpio = <0 + &pioD 15 GPIO_ACTIVE_HIGH + &pioD 16 GPIO_ACTIVE_HIGH>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +index cf13632edd444..5179258f92470 100644 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +@@ -242,6 +242,11 @@ + atmel,pins = + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */ + }; ++ pinctrl_usb_default: usb_default { ++ atmel,pins = ++ <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE ++ AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; ++ }; + }; + }; + }; +@@ -259,6 +264,8 @@ + &pioE 3 GPIO_ACTIVE_LOW + &pioE 4 GPIO_ACTIVE_LOW + >; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +index e5974a17374cf..0b3ad1b580b83 100644 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +@@ -134,6 +134,11 @@ + atmel,pins = + <AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; + }; ++ pinctrl_usb_default: usb_default { ++ atmel,pins = ++ <AT91_PIOE 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE ++ AT91_PIOE 14 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; ++ }; + pinctrl_key_gpio: key_gpio_0 { + atmel,pins = + <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; +@@ -159,6 +164,8 @@ + &pioE 11 GPIO_ACTIVE_HIGH + &pioE 14 GPIO_ACTIVE_HIGH + >; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi +index 5653e70c84b4b..36a42a9fe1957 100644 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi +@@ -282,23 +282,26 @@ + atmel,adc-use-res = "highres"; + + trigger0 { +- trigger-name = "timer-counter-0"; ++ trigger-name = "external-rising"; + trigger-value = <0x1>; ++ trigger-external; + }; ++ + trigger1 { +- trigger-name = "timer-counter-1"; +- trigger-value = <0x3>; ++ trigger-name = "external-falling"; ++ trigger-value = <0x2>; ++ trigger-external; + }; + + trigger2 { +- trigger-name = "timer-counter-2"; +- trigger-value = <0x5>; ++ trigger-name = "external-any"; ++ trigger-value = <0x3>; ++ trigger-external; + }; + + trigger3 { +- trigger-name = "external"; +- trigger-value = <0x13>; +- trigger-external; ++ trigger-name = "continuous"; ++ trigger-value = <0x6>; + }; + }; + +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts +index 0c26467de4d03..5963566dbcc9d 100644 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts +@@ -224,7 +224,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +index cc498191ddd1d..8f4eb1ed45816 100644 +--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts ++++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +@@ -81,7 +81,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>; + }; + }; +diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts +index cfa85aa3da085..6afa8fd7c412d 100644 +--- a/arch/arm/boot/dts/omap4-panda-es.dts ++++ b/arch/arm/boot/dts/omap4-panda-es.dts +@@ -46,7 +46,7 @@ + + button_pins: pinmux_button_pins { + pinctrl-single,pins = < +- OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */ ++ OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */ + >; + }; + }; +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi +index 2ddc85dff8ce9..2c4952427296e 100644 +--- a/arch/arm/boot/dts/sama5d2.dtsi ++++ b/arch/arm/boot/dts/sama5d2.dtsi +@@ -656,6 +656,7 @@ + clocks = <&pmc PMC_TYPE_PERIPHERAL 51>; + #address-cells = <1>; + #size-cells = <1>; ++ no-memory-wc; + ranges = <0 0xf8044000 0x1420>; + }; + +@@ -724,7 +725,7 @@ + + can0: can@f8054000 { + compatible = "bosch,m_can"; +- reg = <0xf8054000 0x4000>, <0x210000 0x4000>; ++ reg = <0xf8054000 0x4000>, <0x210000 0x1c00>; + reg-names = "m_can", "message_ram"; + interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>, + <64 IRQ_TYPE_LEVEL_HIGH 7>; +@@ -1130,7 +1131,7 @@ + + can1: can@fc050000 { + compatible = "bosch,m_can"; +- reg = <0xfc050000 0x4000>, <0x210000 0x4000>; ++ reg = <0xfc050000 0x4000>, <0x210000 0x3800>; + reg-names = "m_can", "message_ram"; + interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>, + <65 IRQ_TYPE_LEVEL_HIGH 7>; +@@ -1140,7 +1141,7 @@ + assigned-clocks = <&pmc PMC_TYPE_GCK 57>; + assigned-clock-parents = <&pmc PMC_TYPE_CORE PMC_UTMI>; + assigned-clock-rates = <40000000>; +- bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>; ++ bosch,mram-cfg = <0x1c00 0 0 64 0 0 32 32>; + status = "disabled"; + }; + +diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts +index b158771ac0b7d..055334ae3d288 100644 +--- a/arch/arm/boot/dts/tegra20-ventana.dts ++++ b/arch/arm/boot/dts/tegra20-ventana.dts +@@ -3,6 +3,7 @@ + + #include <dt-bindings/input/input.h> + #include "tegra20.dtsi" ++#include "tegra20-cpu-opp.dtsi" + + / { + model = "NVIDIA Tegra20 Ventana evaluation board"; +@@ -592,6 +593,16 @@ + #clock-cells = <0>; + }; + ++ cpus { ++ cpu0: cpu@0 { ++ operating-points-v2 = <&cpu0_opp_table>; ++ }; ++ ++ cpu@1 { ++ operating-points-v2 = <&cpu0_opp_table>; ++ }; ++ }; ++ + gpio-keys { + compatible = "gpio-keys"; + +diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S +index 4d1707388d941..312428d83eedb 100644 +--- a/arch/arm/crypto/aes-ce-core.S ++++ b/arch/arm/crypto/aes-ce-core.S +@@ -386,20 +386,32 @@ ENTRY(ce_aes_ctr_encrypt) + .Lctrloop4x: + subs r4, r4, #4 + bmi .Lctr1x +- add r6, r6, #1 ++ ++ /* ++ * NOTE: the sequence below has been carefully tweaked to avoid ++ * a silicon erratum that exists in Cortex-A57 (#1742098) and ++ * Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs ++ * may produce an incorrect result if they take their input from a ++ * register of which a single 32-bit lane has been updated the last ++ * time it was modified. To work around this, the lanes of registers ++ * q0-q3 below are not manipulated individually, and the different ++ * counter values are prepared by successive manipulations of q7. ++ */ ++ add ip, r6, #1 + vmov q0, q7 ++ rev ip, ip ++ add lr, r6, #2 ++ vmov s31, ip @ set lane 3 of q1 via q7 ++ add ip, r6, #3 ++ rev lr, lr + vmov q1, q7 +- rev ip, r6 +- add r6, r6, #1 ++ vmov s31, lr @ set lane 3 of q2 via q7 ++ rev ip, ip + vmov q2, q7 +- vmov s7, ip +- rev ip, r6 +- add r6, r6, #1 ++ vmov s31, ip @ set lane 3 of q3 via q7 ++ add r6, r6, #4 + vmov q3, q7 +- vmov s11, ip +- rev ip, r6 +- add r6, r6, #1 +- vmov s15, ip ++ + vld1.8 {q4-q5}, [r1]! + vld1.8 {q6}, [r1]! + vld1.8 {q15}, [r1]! +diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c +index bda8bf17631e1..f70af1d0514b9 100644 +--- a/arch/arm/crypto/aes-neonbs-glue.c ++++ b/arch/arm/crypto/aes-neonbs-glue.c +@@ -19,7 +19,7 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); + MODULE_LICENSE("GPL v2"); + + MODULE_ALIAS_CRYPTO("ecb(aes)"); +-MODULE_ALIAS_CRYPTO("cbc(aes)"); ++MODULE_ALIAS_CRYPTO("cbc(aes)-all"); + MODULE_ALIAS_CRYPTO("ctr(aes)"); + MODULE_ALIAS_CRYPTO("xts(aes)"); + +@@ -191,7 +191,8 @@ static int cbc_init(struct crypto_skcipher *tfm) + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned int reqsize; + +- ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); ++ ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->enc_tfm)) + return PTR_ERR(ctx->enc_tfm); + +@@ -441,7 +442,8 @@ static struct skcipher_alg aes_algs[] = { { + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), + .base.cra_module = THIS_MODULE, +- .base.cra_flags = CRYPTO_ALG_INTERNAL, ++ .base.cra_flags = CRYPTO_ALG_INTERNAL | ++ CRYPTO_ALG_NEED_FALLBACK, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index 55a47df047738..1c9e6d1452c5b 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -252,31 +252,10 @@ __und_svc: + #else + svc_entry + #endif +- @ +- @ call emulation code, which returns using r9 if it has emulated +- @ the instruction, or the more conventional lr if we are to treat +- @ this as a real undefined instruction +- @ +- @ r0 - instruction +- @ +-#ifndef CONFIG_THUMB2_KERNEL +- ldr r0, [r4, #-4] +-#else +- mov r1, #2 +- ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 +- cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 +- blo __und_svc_fault +- ldrh r9, [r4] @ bottom 16 bits +- add r4, r4, #2 +- str r4, [sp, #S_PC] +- orr r0, r9, r0, lsl #16 +-#endif +- badr r9, __und_svc_finish +- mov r2, r4 +- bl call_fpe + + mov r1, #4 @ PC correction to apply +-__und_svc_fault: ++ THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode? ++ THUMB( movne r1, #2 ) @ if so, fix up PC correction + mov r0, sp @ struct pt_regs *regs + bl __und_fault + +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S +index f8904227e7fdc..98c1e68bdfcbb 100644 +--- a/arch/arm/kernel/head.S ++++ b/arch/arm/kernel/head.S +@@ -671,12 +671,8 @@ ARM_BE8(rev16 ip, ip) + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 1b + bx lr +-#else +-#ifdef CONFIG_CPU_ENDIAN_BE8 +- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction + #else + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction +-#endif + b 2f + 1: ldr ip, [r7, r3] + #ifdef CONFIG_CPU_ENDIAN_BE8 +@@ -685,7 +681,7 @@ ARM_BE8(rev16 ip, ip) + tst ip, #0x000f0000 @ check the rotation field + orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 + biceq ip, ip, #0x00004000 @ clear bit 22 +- orreq ip, ip, r0 @ mask in offset bits 7-0 ++ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0 + #else + bic ip, ip, #0x000000ff + tst ip, #0xf00 @ check the rotation field +diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S +index 0186cf9da890b..27b0a1f27fbdf 100644 +--- a/arch/arm/vfp/entry.S ++++ b/arch/arm/vfp/entry.S +@@ -37,20 +37,3 @@ ENDPROC(vfp_null_entry) + .align 2 + .LCvfp: + .word vfp_vector +- +-@ This code is called if the VFP does not exist. It needs to flag the +-@ failure to the VFP initialisation code. +- +- __INIT +-ENTRY(vfp_testing_entry) +- dec_preempt_count_ti r10, r4 +- ldr r0, VFP_arch_address +- str r0, [r0] @ set to non-zero value +- ret r9 @ we have handled the fault +-ENDPROC(vfp_testing_entry) +- +- .align 2 +-VFP_arch_address: +- .word VFP_arch +- +- __FINIT +diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S +index 4fcff9f59947d..d5837bf05a9a5 100644 +--- a/arch/arm/vfp/vfphw.S ++++ b/arch/arm/vfp/vfphw.S +@@ -79,11 +79,6 @@ ENTRY(vfp_support_entry) + DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 + + .fpu vfpv2 +- ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions +- and r3, r3, #MODE_MASK @ are supported in kernel mode +- teq r3, #USR_MODE +- bne vfp_kmode_exception @ Returns through lr +- + VFPFMRX r1, FPEXC @ Is the VFP enabled? + DBGSTR1 "fpexc %08x", r1 + tst r1, #FPEXC_EN +diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c +index 8c9e7f9f0277d..2cb355c1b5b71 100644 +--- a/arch/arm/vfp/vfpmodule.c ++++ b/arch/arm/vfp/vfpmodule.c +@@ -23,6 +23,7 @@ + #include <asm/cputype.h> + #include <asm/system_info.h> + #include <asm/thread_notify.h> ++#include <asm/traps.h> + #include <asm/vfp.h> + + #include "vfpinstr.h" +@@ -31,7 +32,6 @@ + /* + * Our undef handlers (in entry.S) + */ +-asmlinkage void vfp_testing_entry(void); + asmlinkage void vfp_support_entry(void); + asmlinkage void vfp_null_entry(void); + +@@ -42,7 +42,7 @@ asmlinkage void (*vfp_vector)(void) = vfp_null_entry; + * Used in startup: set to non-zero if VFP checks fail + * After startup, holds VFP architecture + */ +-unsigned int VFP_arch; ++static unsigned int __initdata VFP_arch; + + /* + * The pointer to the vfpstate structure of the thread which currently +@@ -436,7 +436,7 @@ static void vfp_enable(void *unused) + * present on all CPUs within a SMP complex. Needs to be called prior to + * vfp_init(). + */ +-void vfp_disable(void) ++void __init vfp_disable(void) + { + if (VFP_arch) { + pr_debug("%s: should be called prior to vfp_init\n", __func__); +@@ -642,7 +642,9 @@ static int vfp_starting_cpu(unsigned int unused) + return 0; + } + +-void vfp_kmode_exception(void) ++#ifdef CONFIG_KERNEL_MODE_NEON ++ ++static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) + { + /* + * If we reach this point, a floating point exception has been raised +@@ -660,9 +662,51 @@ void vfp_kmode_exception(void) + pr_crit("BUG: unsupported FP instruction in kernel mode\n"); + else + pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); ++ pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC)); ++ return 1; + } + +-#ifdef CONFIG_KERNEL_MODE_NEON ++static struct undef_hook vfp_kmode_exception_hook[] = {{ ++ .instr_mask = 0xfe000000, ++ .instr_val = 0xf2000000, ++ .cpsr_mask = MODE_MASK | PSR_T_BIT, ++ .cpsr_val = SVC_MODE, ++ .fn = vfp_kmode_exception, ++}, { ++ .instr_mask = 0xff100000, ++ .instr_val = 0xf4000000, ++ .cpsr_mask = MODE_MASK | PSR_T_BIT, ++ .cpsr_val = SVC_MODE, ++ .fn = vfp_kmode_exception, ++}, { ++ .instr_mask = 0xef000000, ++ .instr_val = 0xef000000, ++ .cpsr_mask = MODE_MASK | PSR_T_BIT, ++ .cpsr_val = SVC_MODE | PSR_T_BIT, ++ .fn = vfp_kmode_exception, ++}, { ++ .instr_mask = 0xff100000, ++ .instr_val = 0xf9000000, ++ .cpsr_mask = MODE_MASK | PSR_T_BIT, ++ .cpsr_val = SVC_MODE | PSR_T_BIT, ++ .fn = vfp_kmode_exception, ++}, { ++ .instr_mask = 0x0c000e00, ++ .instr_val = 0x0c000a00, ++ .cpsr_mask = MODE_MASK, ++ .cpsr_val = SVC_MODE, ++ .fn = vfp_kmode_exception, ++}}; ++ ++static int __init vfp_kmode_exception_hook_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++) ++ register_undef_hook(&vfp_kmode_exception_hook[i]); ++ return 0; ++} ++subsys_initcall(vfp_kmode_exception_hook_init); + + /* + * Kernel-side NEON support functions +@@ -708,6 +752,21 @@ EXPORT_SYMBOL(kernel_neon_end); + + #endif /* CONFIG_KERNEL_MODE_NEON */ + ++static int __init vfp_detect(struct pt_regs *regs, unsigned int instr) ++{ ++ VFP_arch = UINT_MAX; /* mark as not present */ ++ regs->ARM_pc += 4; ++ return 0; ++} ++ ++static struct undef_hook vfp_detect_hook __initdata = { ++ .instr_mask = 0x0c000e00, ++ .instr_val = 0x0c000a00, ++ .cpsr_mask = MODE_MASK, ++ .cpsr_val = SVC_MODE, ++ .fn = vfp_detect, ++}; ++ + /* + * VFP support code initialisation. + */ +@@ -728,10 +787,11 @@ static int __init vfp_init(void) + * The handler is already setup to just log calls, so + * we just need to read the VFPSID register. + */ +- vfp_vector = vfp_testing_entry; ++ register_undef_hook(&vfp_detect_hook); + barrier(); + vfpsid = fmrx(FPSID); + barrier(); ++ unregister_undef_hook(&vfp_detect_hook); + vfp_vector = vfp_null_entry; + + pr_info("VFP support v0.3: "); +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts +index 1b07c8c06eac5..463a72d6bb7c7 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts +@@ -340,7 +340,7 @@ + eee-broken-1000t; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +index 6982632ae6461..39a09661c5f62 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +@@ -413,7 +413,7 @@ + max-speed = <1000>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi +index 2802ddbb83ac7..feb0885047400 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi +@@ -264,7 +264,7 @@ + max-speed = <1000>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +index 7be3e354093bf..de27beafe9db9 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +@@ -165,7 +165,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +index 70fcfb7b0683d..50de1d01e5655 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +@@ -200,7 +200,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +index 222ee8069cfaa..9b0b81f191f1f 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +@@ -126,7 +126,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +index ad812854a107f..a350fee1264d7 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +@@ -147,7 +147,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts +index b08c4537f260d..b2ab05c220903 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts +@@ -82,7 +82,7 @@ + + /* External PHY reset is shared with internal PHY Led signal */ + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +index bff8ec2c1c70c..62d3e04299b67 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +@@ -194,7 +194,7 @@ + reg = <0>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +@@ -341,7 +341,7 @@ + #size-cells = <1>; + compatible = "winbond,w25q16", "jedec,spi-nor"; + reg = <0>; +- spi-max-frequency = <3000000>; ++ spi-max-frequency = <104000000>; + }; + }; + +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +index 83eca3af44ce7..dfa7a37a1281f 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +@@ -112,7 +112,7 @@ + max-speed = <1000>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + }; + }; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts +index ea45ae0c71b7f..8edbfe040805c 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts +@@ -64,7 +64,7 @@ + + /* External PHY reset is shared with internal PHY Led signal */ + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + + interrupt-parent = <&gpio_intc>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +index c89c9f846fb10..dde7cfe12cffa 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +@@ -114,7 +114,7 @@ + max-speed = <1000>; + + reset-assert-us = <10000>; +- reset-deassert-us = <30000>; ++ reset-deassert-us = <80000>; + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; + }; + }; +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi +index 71317f5aada1d..c309517abae32 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi +@@ -130,7 +130,7 @@ + opp-microvolt = <790000>; + }; + +- opp-1512000000 { ++ opp-1500000000 { + opp-hz = /bits/ 64 <1500000000>; + opp-microvolt = <800000>; + }; +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi +index b9ed6a33e2901..7599e1a00ff51 100644 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi +@@ -79,8 +79,10 @@ + }; + + psci { +- compatible = "arm,psci-0.2"; ++ compatible = "arm,psci"; + method = "smc"; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xC4000003>; + }; + + soc: soc@0 { +@@ -481,13 +483,6 @@ + pmu_system_controller: system-controller@105c0000 { + compatible = "samsung,exynos7-pmu", "syscon"; + reg = <0x105c0000 0x5000>; +- +- reboot: syscon-reboot { +- compatible = "syscon-reboot"; +- regmap = <&pmu_system_controller>; +- offset = <0x0400>; +- mask = <0x1>; +- }; + }; + + rtc: rtc@10590000 { +@@ -687,3 +682,4 @@ + }; + + #include "exynos7-pinctrl.dtsi" ++#include "arm/exynos-syscon-restart.dtsi" +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts +index 8161dd2379712..b3fa4dbeebd52 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts +@@ -155,20 +155,10 @@ + }; + + partition@210000 { +- reg = <0x210000 0x0f0000>; ++ reg = <0x210000 0x1d0000>; + label = "bootloader"; + }; + +- partition@300000 { +- reg = <0x300000 0x040000>; +- label = "DP firmware"; +- }; +- +- partition@340000 { +- reg = <0x340000 0x0a0000>; +- label = "trusted firmware"; +- }; +- + partition@3e0000 { + reg = <0x3e0000 0x020000>; + label = "bootloader environment"; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +index 7a6fb7e1fb82f..33aa0efa2293a 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +@@ -309,7 +309,7 @@ + <0x0 0x20000000 0x0 0x10000000>; + reg-names = "fspi_base", "fspi_mmap"; + interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clocks = <&clockgen 2 0>, <&clockgen 2 0>; + clock-names = "fspi_en", "fspi"; + status = "disabled"; + }; +@@ -934,7 +934,7 @@ + ethernet@0,4 { + compatible = "fsl,enetc-ptp"; + reg = <0x000400 0 0 0 0>; +- clocks = <&clockgen 4 0>; ++ clocks = <&clockgen 2 3>; + little-endian; + fsl,extts-fifo; + }; +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +index f3a678e0fd99b..bf76ebe463794 100644 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +@@ -146,7 +146,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + phy-mode = "rgmii-id"; +- phy = <&phy1>; ++ phy-handle = <&phy1>; + status = "okay"; + }; + +diff --git a/arch/arm64/boot/dts/marvell/armada-7040.dtsi b/arch/arm64/boot/dts/marvell/armada-7040.dtsi +index 7a3198cd7a071..2f440711d21d2 100644 +--- a/arch/arm64/boot/dts/marvell/armada-7040.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-7040.dtsi +@@ -15,10 +15,6 @@ + "marvell,armada-ap806"; + }; + +-&smmu { +- status = "okay"; +-}; +- + &cp0_pcie0 { + iommu-map = + <0x0 &smmu 0x480 0x20>, +diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi +index 79e8ce59baa88..22c2d6ebf3818 100644 +--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi +@@ -15,10 +15,6 @@ + "marvell,armada-ap806"; + }; + +-&smmu { +- status = "okay"; +-}; +- + &cp0_pcie0 { + iommu-map = + <0x0 &smmu 0x480 0x20>, +diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi +index 9cfd961c45eb3..08a914d3a6435 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi +@@ -363,7 +363,7 @@ + compatible = "mediatek,mt8183-gce"; + reg = <0 0x10238000 0 0x4000>; + interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_LOW>; +- #mbox-cells = <3>; ++ #mbox-cells = <2>; + clocks = <&infracfg CLK_INFRA_GCE>; + clock-names = "gce"; + }; +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +index 93438d2b94696..6946fb210e484 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +@@ -378,7 +378,7 @@ + nvidia,schmitt = <TEGRA_PIN_DISABLE>; + nvidia,lpdr = <TEGRA_PIN_ENABLE>; + nvidia,enable-input = <TEGRA_PIN_DISABLE>; +- nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>; ++ nvidia,io-hv = <TEGRA_PIN_ENABLE>; + nvidia,tristate = <TEGRA_PIN_DISABLE>; + nvidia,pull = <TEGRA_PIN_PULL_NONE>; + }; +@@ -390,7 +390,7 @@ + nvidia,schmitt = <TEGRA_PIN_DISABLE>; + nvidia,lpdr = <TEGRA_PIN_ENABLE>; + nvidia,enable-input = <TEGRA_PIN_ENABLE>; +- nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>; ++ nvidia,io-hv = <TEGRA_PIN_ENABLE>; + nvidia,tristate = <TEGRA_PIN_DISABLE>; + nvidia,pull = <TEGRA_PIN_PULL_NONE>; + }; +diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +index 59e0cbfa22143..cdc1e3d60c58e 100644 +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -156,8 +156,8 @@ + no-map; + }; + +- tz: tz@48500000 { +- reg = <0x0 0x48500000 0x0 0x00200000>; ++ tz: memory@4a600000 { ++ reg = <0x0 0x4a600000 0x0 0x00400000>; + no-map; + }; + +@@ -167,7 +167,7 @@ + }; + + q6_region: memory@4ab00000 { +- reg = <0x0 0x4ab00000 0x0 0x02800000>; ++ reg = <0x0 0x4ab00000 0x0 0x05500000>; + no-map; + }; + }; +diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi +index b18d21e42f596..f7ac4c4033db6 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi +@@ -78,6 +78,9 @@ + sda-gpios = <&msmgpio 105 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + scl-gpios = <&msmgpio 106 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + ++ pinctrl-names = "default"; ++ pinctrl-0 = <&muic_i2c_default>; ++ + #address-cells = <1>; + #size-cells = <0>; + +@@ -314,6 +317,14 @@ + }; + }; + ++ muic_i2c_default: muic-i2c-default { ++ pins = "gpio105", "gpio106"; ++ function = "gpio"; ++ ++ drive-strength = <2>; ++ bias-disable; ++ }; ++ + muic_int_default: muic-int-default { + pins = "gpio12"; + function = "gpio"; +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi +index 6678f1e8e3958..c71f3afc1cc9f 100644 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi +@@ -1394,7 +1394,8 @@ + ipa: ipa@1e40000 { + compatible = "qcom,sc7180-ipa"; + +- iommus = <&apps_smmu 0x440 0x3>; ++ iommus = <&apps_smmu 0x440 0x0>, ++ <&apps_smmu 0x442 0x0>; + reg = <0 0x1e40000 0 0x7000>, + <0 0x1e47000 0 0x2000>, + <0 0x1e04000 0 0x2c000>; +@@ -2811,7 +2812,7 @@ + interrupt-controller; + #interrupt-cells = <1>; + +- interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>; ++ interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>; + interconnect-names = "mdp0-mem"; + + iommus = <&apps_smmu 0x800 0x2>; +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi +index 40e8c11f23ab0..f97f354af86f4 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi +@@ -2141,7 +2141,8 @@ + ipa: ipa@1e40000 { + compatible = "qcom,sdm845-ipa"; + +- iommus = <&apps_smmu 0x720 0x3>; ++ iommus = <&apps_smmu 0x720 0x0>, ++ <&apps_smmu 0x722 0x0>; + reg = <0 0x1e40000 0 0x7000>, + <0 0x1e47000 0 0x2000>, + <0 0x1e04000 0 0x2c000>; +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +index d03ca31907466..76a8c996d497f 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +@@ -264,23 +264,28 @@ + status = "okay"; + clock-frequency = <400000>; + +- hid@15 { ++ tsel: hid@15 { + compatible = "hid-over-i2c"; + reg = <0x15>; + hid-descr-addr = <0x1>; + +- interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>; ++ interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c3_hid_active>; + }; + +- hid@2c { ++ tsc2: hid@2c { + compatible = "hid-over-i2c"; + reg = <0x2c>; + hid-descr-addr = <0x20>; + +- interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>; ++ interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; + + pinctrl-names = "default"; +- pinctrl-0 = <&i2c2_hid_active>; ++ pinctrl-0 = <&i2c3_hid_active>; ++ ++ status = "disabled"; + }; + }; + +@@ -288,15 +293,15 @@ + status = "okay"; + clock-frequency = <400000>; + +- hid@10 { ++ tsc1: hid@10 { + compatible = "hid-over-i2c"; + reg = <0x10>; + hid-descr-addr = <0x1>; + +- interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>; ++ interrupts-extended = <&tlmm 125 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-names = "default"; +- pinctrl-0 = <&i2c6_hid_active>; ++ pinctrl-0 = <&i2c5_hid_active>; + }; + }; + +@@ -304,7 +309,7 @@ + status = "okay"; + clock-frequency = <400000>; + +- hid@5c { ++ ecsh: hid@5c { + compatible = "hid-over-i2c"; + reg = <0x5c>; + hid-descr-addr = <0x1>; +@@ -312,7 +317,7 @@ + interrupts-extended = <&tlmm 92 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-names = "default"; +- pinctrl-0 = <&i2c12_hid_active>; ++ pinctrl-0 = <&i2c11_hid_active>; + }; + }; + +@@ -426,8 +431,8 @@ + &tlmm { + gpio-reserved-ranges = <0 4>, <81 4>; + +- i2c2_hid_active: i2c2-hid-active { +- pins = <37>; ++ i2c3_hid_active: i2c2-hid-active { ++ pins = "gpio37"; + function = "gpio"; + + input-enable; +@@ -435,8 +440,8 @@ + drive-strength = <2>; + }; + +- i2c6_hid_active: i2c6-hid-active { +- pins = <125>; ++ i2c5_hid_active: i2c5-hid-active { ++ pins = "gpio125"; + function = "gpio"; + + input-enable; +@@ -444,8 +449,8 @@ + drive-strength = <2>; + }; + +- i2c12_hid_active: i2c12-hid-active { +- pins = <92>; ++ i2c11_hid_active: i2c11-hid-active { ++ pins = "gpio92"; + function = "gpio"; + + input-enable; +@@ -454,7 +459,7 @@ + }; + + wcd_intr_default: wcd_intr_default { +- pins = <54>; ++ pins = "gpio54"; + function = "gpio"; + + input-enable; +diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +index fd194ed7fbc86..98675e1f8204f 100644 +--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts ++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +@@ -14,7 +14,7 @@ + + / { + model = "Qualcomm Technologies, Inc. SM8250 MTP"; +- compatible = "qcom,sm8250-mtp"; ++ compatible = "qcom,sm8250-mtp", "qcom,sm8250"; + + aliases { + serial0 = &uart12; +diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi +index 33daa95706840..801ea54b027c4 100644 +--- a/arch/arm64/boot/dts/renesas/cat875.dtsi ++++ b/arch/arm64/boot/dts/renesas/cat875.dtsi +@@ -21,7 +21,6 @@ + status = "okay"; + + phy0: ethernet-phy@0 { +- rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio2>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; +diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi +index 178401a34cbf8..b9e46aed53362 100644 +--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi ++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi +@@ -23,7 +23,6 @@ + status = "okay"; + + phy0: ethernet-phy@0 { +- rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio2>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +index b70ffb1c6a630..b76282e704de1 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +@@ -334,6 +334,7 @@ + }; + + &usb20_otg { ++ dr_mode = "host"; + status = "okay"; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +index bbdb19a3e85d1..db0d5c8e5f96a 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +@@ -1237,8 +1237,8 @@ + + uart0 { + uart0_xfer: uart0-xfer { +- rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>, +- <1 RK_PB0 1 &pcfg_pull_none>; ++ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>, ++ <1 RK_PB0 1 &pcfg_pull_up>; + }; + + uart0_cts: uart0-cts { +@@ -1256,8 +1256,8 @@ + + uart1 { + uart1_xfer: uart1-xfer { +- rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>, +- <3 RK_PA6 4 &pcfg_pull_none>; ++ rockchip,pins = <3 RK_PA4 4 &pcfg_pull_none>, ++ <3 RK_PA6 4 &pcfg_pull_up>; + }; + + uart1_cts: uart1-cts { +@@ -1275,15 +1275,15 @@ + + uart2-0 { + uart2m0_xfer: uart2m0-xfer { +- rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>, +- <1 RK_PA1 2 &pcfg_pull_none>; ++ rockchip,pins = <1 RK_PA0 2 &pcfg_pull_none>, ++ <1 RK_PA1 2 &pcfg_pull_up>; + }; + }; + + uart2-1 { + uart2m1_xfer: uart2m1-xfer { +- rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>, +- <2 RK_PA1 1 &pcfg_pull_none>; ++ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>, ++ <2 RK_PA1 1 &pcfg_pull_up>; + }; + }; + +diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +index 533525229a8db..b9662205be9bf 100644 +--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +@@ -834,7 +834,7 @@ + }; + }; + +- dss: dss@04a00000 { ++ dss: dss@4a00000 { + compatible = "ti,am65x-dss"; + reg = <0x0 0x04a00000 0x0 0x1000>, /* common */ + <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */ +@@ -867,6 +867,8 @@ + + status = "disabled"; + ++ dma-coherent; ++ + dss_ports: ports { + #address-cells = <1>; + #size-cells = <0>; +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +index e2a96b2c423c4..c66ded9079be4 100644 +--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +@@ -1278,7 +1278,7 @@ + }; + }; + +- dss: dss@04a00000 { ++ dss: dss@4a00000 { + compatible = "ti,j721e-dss"; + reg = + <0x00 0x04a00000 0x00 0x10000>, /* common_m */ +diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl +index 6e5576d19af8f..cbc980fb02e33 100644 +--- a/arch/arm64/crypto/poly1305-armv8.pl ++++ b/arch/arm64/crypto/poly1305-armv8.pl +@@ -840,7 +840,6 @@ poly1305_blocks_neon: + ldp d14,d15,[sp,#64] + addp $ACC2,$ACC2,$ACC2 + ldr x30,[sp,#8] +- .inst 0xd50323bf // autiasp + + //////////////////////////////////////////////////////////////// + // lazy reduction, but without narrowing +@@ -882,6 +881,7 @@ poly1305_blocks_neon: + str x4,[$ctx,#8] // set is_base2_26 + + ldr x29,[sp],#80 ++ .inst 0xd50323bf // autiasp + ret + .size poly1305_blocks_neon,.-poly1305_blocks_neon + +diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped +index 8d1c4e420ccdc..fb2822abf63aa 100644 +--- a/arch/arm64/crypto/poly1305-core.S_shipped ++++ b/arch/arm64/crypto/poly1305-core.S_shipped +@@ -779,7 +779,6 @@ poly1305_blocks_neon: + ldp d14,d15,[sp,#64] + addp v21.2d,v21.2d,v21.2d + ldr x30,[sp,#8] +- .inst 0xd50323bf // autiasp + + //////////////////////////////////////////////////////////////// + // lazy reduction, but without narrowing +@@ -821,6 +820,7 @@ poly1305_blocks_neon: + str x4,[x0,#8] // set is_base2_26 + + ldr x29,[sp],#80 ++ .inst 0xd50323bf // autiasp + ret + .size poly1305_blocks_neon,.-poly1305_blocks_neon + +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index 0cd9f0f75c135..cc060c41adaab 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -214,6 +214,7 @@ enum vcpu_sysreg { + #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ + #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ + #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ ++#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */ + #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ + #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ + #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ +diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c +index 52a0638ed967b..ef15c8a2a49dc 100644 +--- a/arch/arm64/kernel/mte.c ++++ b/arch/arm64/kernel/mte.c +@@ -189,7 +189,8 @@ long get_mte_ctrl(struct task_struct *task) + + switch (task->thread.sctlr_tcf0) { + case SCTLR_EL1_TCF0_NONE: +- return PR_MTE_TCF_NONE; ++ ret |= PR_MTE_TCF_NONE; ++ break; + case SCTLR_EL1_TCF0_SYNC: + ret |= PR_MTE_TCF_SYNC; + break; +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index c1fac9836af1a..2b28bf1a53266 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -1987,6 +1987,7 @@ static const struct sys_reg_desc cp15_regs[] = { + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, ++ { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 }, + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, +diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c +index 0ac53d87493c8..2bea1799b8de7 100644 +--- a/arch/m68k/mac/config.c ++++ b/arch/m68k/mac/config.c +@@ -777,16 +777,12 @@ static struct resource scc_b_rsrcs[] = { + struct platform_device scc_a_pdev = { + .name = "scc", + .id = 0, +- .num_resources = ARRAY_SIZE(scc_a_rsrcs), +- .resource = scc_a_rsrcs, + }; + EXPORT_SYMBOL(scc_a_pdev); + + struct platform_device scc_b_pdev = { + .name = "scc", + .id = 1, +- .num_resources = ARRAY_SIZE(scc_b_rsrcs), +- .resource = scc_b_rsrcs, + }; + EXPORT_SYMBOL(scc_b_pdev); + +@@ -813,10 +809,15 @@ static void __init mac_identify(void) + + /* Set up serial port resources for the console initcall. */ + +- scc_a_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase + 2; +- scc_a_rsrcs[0].end = scc_a_rsrcs[0].start; +- scc_b_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase; +- scc_b_rsrcs[0].end = scc_b_rsrcs[0].start; ++ scc_a_rsrcs[0].start = (resource_size_t)mac_bi_data.sccbase + 2; ++ scc_a_rsrcs[0].end = scc_a_rsrcs[0].start; ++ scc_a_pdev.num_resources = ARRAY_SIZE(scc_a_rsrcs); ++ scc_a_pdev.resource = scc_a_rsrcs; ++ ++ scc_b_rsrcs[0].start = (resource_size_t)mac_bi_data.sccbase; ++ scc_b_rsrcs[0].end = scc_b_rsrcs[0].start; ++ scc_b_pdev.num_resources = ARRAY_SIZE(scc_b_rsrcs); ++ scc_b_pdev.resource = scc_b_rsrcs; + + switch (macintosh_config->scc_type) { + case MAC_SCC_PSC: +diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig +index 6889f74e06f54..490bb6da74b7e 100644 +--- a/arch/mips/bcm47xx/Kconfig ++++ b/arch/mips/bcm47xx/Kconfig +@@ -27,6 +27,7 @@ config BCM47XX_BCMA + select BCMA + select BCMA_HOST_SOC + select BCMA_DRIVER_MIPS ++ select BCMA_DRIVER_PCI if PCI + select BCMA_DRIVER_PCI_HOSTMODE if PCI + select BCMA_DRIVER_GPIO + default y +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c +index ca579deef9391..9d11f68a9e8bb 100644 +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -498,8 +498,8 @@ static void __init request_crashkernel(struct resource *res) + + static void __init check_kernel_sections_mem(void) + { +- phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text))); +- phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start; ++ phys_addr_t start = __pa_symbol(&_text); ++ phys_addr_t size = __pa_symbol(&_end) - start; + + if (!memblock_is_region_memory(start, size)) { + pr_info("Kernel sections are not in the memory maps\n"); +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile +index f8ce6d2dde7b1..e4b364b5da9e7 100644 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -368,6 +368,8 @@ initrd-y := $(filter-out $(image-y), $(initrd-y)) + targets += $(image-y) $(initrd-y) + targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \ + $(patsubst $(x).%, dts/%.dtb, $(filter $(x).%, $(image-y)))) ++targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \ ++ $(patsubst $(x).%, dts/fsl/%.dtb, $(filter $(x).%, $(image-y)))) + + $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz + +diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h +index 4a4d3afd53406..299ab33505a6c 100644 +--- a/arch/powerpc/include/asm/bitops.h ++++ b/arch/powerpc/include/asm/bitops.h +@@ -216,15 +216,34 @@ static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) + */ + static inline int fls(unsigned int x) + { +- return 32 - __builtin_clz(x); ++ int lz; ++ ++ if (__builtin_constant_p(x)) ++ return x ? 32 - __builtin_clz(x) : 0; ++ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x)); ++ return 32 - lz; + } + + #include <asm-generic/bitops/builtin-__fls.h> + ++/* ++ * 64-bit can do this using one cntlzd (count leading zeroes doubleword) ++ * instruction; for 32-bit we use the generic version, which does two ++ * 32-bit fls calls. ++ */ ++#ifdef CONFIG_PPC64 + static inline int fls64(__u64 x) + { +- return 64 - __builtin_clzll(x); ++ int lz; ++ ++ if (__builtin_constant_p(x)) ++ return x ? 64 - __builtin_clzll(x) : 0; ++ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x)); ++ return 64 - lz; + } ++#else ++#include <asm-generic/bitops/fls64.h> ++#endif + + #ifdef CONFIG_PPC64 + unsigned int __arch_hweight8(unsigned int w); +diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h +index 2e277ca0170fb..a8982d52f6b1d 100644 +--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h ++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h +@@ -94,6 +94,7 @@ typedef struct { + } mm_context_t; + + void update_bats(void); ++static inline void cleanup_cpu_mmu_context(void) { }; + + /* patch sites */ + extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h +index 1376be95e975f..523d3e6e24009 100644 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h +@@ -524,9 +524,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h +index a116fe9317892..3bdd74739cb88 100644 +--- a/arch/powerpc/include/asm/cpm1.h ++++ b/arch/powerpc/include/asm/cpm1.h +@@ -68,6 +68,7 @@ extern void cpm_reset(void); + #define PROFF_SPI ((uint)0x0180) + #define PROFF_SCC3 ((uint)0x0200) + #define PROFF_SMC1 ((uint)0x0280) ++#define PROFF_DSP1 ((uint)0x02c0) + #define PROFF_SCC4 ((uint)0x0300) + #define PROFF_SMC2 ((uint)0x0380) + +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h +index 3d2f94afc13ae..398eba3998790 100644 +--- a/arch/powerpc/include/asm/cputable.h ++++ b/arch/powerpc/include/asm/cputable.h +@@ -369,7 +369,7 @@ static inline void cpu_feature_keys_init(void) { } + CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) + #define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE) + #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ +- CPU_FTR_MAYBE_CAN_NAP) ++ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE) + #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ + CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_COMMON | CPU_FTR_NOEXECUTE) +@@ -409,7 +409,6 @@ static inline void cpu_feature_keys_init(void) { } + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) +-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) + + /* 64-bit CPUs */ + #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ +@@ -520,8 +519,6 @@ enum { + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | + CPU_FTRS_CLASSIC32 | +-#else +- CPU_FTRS_GENERIC_32 | + #endif + #ifdef CONFIG_PPC_8xx + CPU_FTRS_8XX | +@@ -596,8 +593,6 @@ enum { + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & + CPU_FTRS_CLASSIC32 & +-#else +- CPU_FTRS_GENERIC_32 & + #endif + #ifdef CONFIG_PPC_8xx + CPU_FTRS_8XX & +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h +index 6277e7596ae58..ac75f4ab0dba1 100644 +--- a/arch/powerpc/include/asm/nohash/pgtable.h ++++ b/arch/powerpc/include/asm/nohash/pgtable.h +@@ -192,9 +192,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + */ + if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + return; +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index bf0bf1b900d21..fe2ef598e2ead 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -173,6 +173,9 @@ KCOV_INSTRUMENT_cputable.o := n + KCOV_INSTRUMENT_setup_64.o := n + KCOV_INSTRUMENT_paca.o := n + ++CFLAGS_setup_64.o += -fno-stack-protector ++CFLAGS_paca.o += -fno-stack-protector ++ + extra-$(CONFIG_PPC_FPU) += fpu.o + extra-$(CONFIG_ALTIVEC) += vector.o + extra-$(CONFIG_PPC64) += entry_64.o +diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h +index 7c767765071da..c88e66adecb52 100644 +--- a/arch/powerpc/kernel/head_32.h ++++ b/arch/powerpc/kernel/head_32.h +@@ -131,18 +131,28 @@ + #ifdef CONFIG_VMAP_STACK + mfspr r11, SPRN_SRR0 + mtctr r11 +-#endif + andi. r11, r9, MSR_PR +- lwz r11,TASK_STACK-THREAD(r12) ++ mr r11, r1 ++ lwz r1,TASK_STACK-THREAD(r12) + beq- 99f +- addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE +-#ifdef CONFIG_VMAP_STACK ++ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE + li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ + mtmsr r10 + isync ++ tovirt(r12, r12) ++ stw r11,GPR1(r1) ++ stw r11,0(r1) ++ mr r11, r1 ++#else ++ andi. r11, r9, MSR_PR ++ lwz r11,TASK_STACK-THREAD(r12) ++ beq- 99f ++ addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE ++ tophys(r11, r11) ++ stw r1,GPR1(r11) ++ stw r1,0(r11) ++ tovirt(r1, r11) /* set new kernel sp */ + #endif +- tovirt_vmstack r12, r12 +- tophys_novmstack r11, r11 + mflr r10 + stw r10, _LINK(r11) + #ifdef CONFIG_VMAP_STACK +@@ -150,9 +160,6 @@ + #else + mfspr r10,SPRN_SRR0 + #endif +- stw r1,GPR1(r11) +- stw r1,0(r11) +- tovirt_novmstack r1, r11 /* set new kernel sp */ + stw r10,_NIP(r11) + mfcr r10 + rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */ +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S +index 1510b2a56669f..2d6581db0c7b6 100644 +--- a/arch/powerpc/kernel/head_64.S ++++ b/arch/powerpc/kernel/head_64.S +@@ -417,6 +417,10 @@ generic_secondary_common_init: + /* From now on, r24 is expected to be logical cpuid */ + mr r24,r5 + ++ /* Create a temp kernel stack for use before relocation is on. */ ++ ld r1,PACAEMERGSP(r13) ++ subi r1,r1,STACK_FRAME_OVERHEAD ++ + /* See if we need to call a cpu state restore handler */ + LOAD_REG_ADDR(r23, cur_cpu_spec) + ld r23,0(r23) +@@ -445,10 +449,6 @@ generic_secondary_common_init: + sync /* order paca.run and cur_cpu_spec */ + isync /* In case code patching happened */ + +- /* Create a temp kernel stack for use before relocation is on. */ +- ld r1,PACAEMERGSP(r13) +- subi r1,r1,STACK_FRAME_OVERHEAD +- + b __secondary_start + #endif /* SMP */ + +@@ -990,7 +990,7 @@ start_here_common: + bl start_kernel + + /* Not reached */ +- trap ++0: trap + EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 + .previous + +diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c +index 0ad15768d762c..7f5aae3c387d2 100644 +--- a/arch/powerpc/kernel/paca.c ++++ b/arch/powerpc/kernel/paca.c +@@ -208,7 +208,7 @@ static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit) + struct paca_struct **paca_ptrs __read_mostly; + EXPORT_SYMBOL(paca_ptrs); + +-void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu) ++void __init initialise_paca(struct paca_struct *new_paca, int cpu) + { + #ifdef CONFIG_PPC_PSERIES + new_paca->lppaca_ptr = NULL; +@@ -241,7 +241,7 @@ void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int + } + + /* Put the paca pointer into r13 and SPRG_PACA */ +-void __nostackprotector setup_paca(struct paca_struct *new_paca) ++void setup_paca(struct paca_struct *new_paca) + { + /* Setup r13 */ + local_paca = new_paca; +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c +index 954f41676f692..cccb32cf0e08c 100644 +--- a/arch/powerpc/kernel/rtas.c ++++ b/arch/powerpc/kernel/rtas.c +@@ -1030,7 +1030,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { + { "ibm,display-message", -1, 0, -1, -1, -1 }, + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, +- { "ibm,open-errinct", -1, -1, -1, -1, -1 }, ++ { "ibm,open-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, + { "ibm,get-indices", -1, 2, 3, -1, -1 }, +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c +index 808ec9fab6052..da8c71f321ad3 100644 +--- a/arch/powerpc/kernel/setup-common.c ++++ b/arch/powerpc/kernel/setup-common.c +@@ -919,8 +919,6 @@ void __init setup_arch(char **cmdline_p) + + /* On BookE, setup per-core TLB data structures. */ + setup_tlb_core_data(); +- +- smp_release_cpus(); + #endif + + /* Print various info about the machine that has been gathered so far. */ +@@ -944,6 +942,8 @@ void __init setup_arch(char **cmdline_p) + exc_lvl_early_init(); + emergency_stack_init(); + ++ smp_release_cpus(); ++ + initmem_init(); + + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); +diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h +index 2ec835574cc94..2dd0d9cb5a208 100644 +--- a/arch/powerpc/kernel/setup.h ++++ b/arch/powerpc/kernel/setup.h +@@ -8,12 +8,6 @@ + #ifndef __ARCH_POWERPC_KERNEL_SETUP_H + #define __ARCH_POWERPC_KERNEL_SETUP_H + +-#ifdef CONFIG_CC_IS_CLANG +-#define __nostackprotector +-#else +-#define __nostackprotector __attribute__((__optimize__("no-stack-protector"))) +-#endif +- + void initialize_cache_info(void); + void irqstack_early_init(void); + +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 74fd47f46fa58..c28e949cc2229 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -283,7 +283,7 @@ void __init record_spr_defaults(void) + * device-tree is not accessible via normal means at this point. + */ + +-void __init __nostackprotector early_setup(unsigned long dt_ptr) ++void __init early_setup(unsigned long dt_ptr) + { + static __initdata struct paca_struct boot_paca; + +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c +index 8c2857cbd9609..7d6cf75a7fd80 100644 +--- a/arch/powerpc/kernel/smp.c ++++ b/arch/powerpc/kernel/smp.c +@@ -919,7 +919,7 @@ static struct sched_domain_topology_level powerpc_topology[] = { + { NULL, }, + }; + +-static int init_big_cores(void) ++static int __init init_big_cores(void) + { + int cpu; + +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c +index 855457ed09b54..b18bce1a209fa 100644 +--- a/arch/powerpc/lib/sstep.c ++++ b/arch/powerpc/lib/sstep.c +@@ -1346,6 +1346,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, + switch (opcode) { + #ifdef __powerpc64__ + case 1: ++ if (!cpu_has_feature(CPU_FTR_ARCH_31)) ++ return -1; ++ + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); + rd = (suffix >> 21) & 0x1f; +@@ -2733,6 +2736,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, + } + break; + case 1: /* Prefixed instructions */ ++ if (!cpu_has_feature(CPU_FTR_ARCH_31)) ++ return -1; ++ + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); + op->update_reg = ra; +@@ -2751,6 +2757,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, + case 41: /* plwa */ + op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); + break; ++#ifdef CONFIG_VSX + case 42: /* plxsd */ + op->reg = rd + 32; + op->type = MKOP(LOAD_VSX, PREFIXED, 8); +@@ -2791,13 +2798,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, + op->element_size = 16; + op->vsx_flags = VSX_CHECK_VEC; + break; ++#endif /* CONFIG_VSX */ + case 56: /* plq */ + op->type = MKOP(LOAD, PREFIXED, 16); + break; + case 57: /* pld */ + op->type = MKOP(LOAD, PREFIXED, 8); + break; +- case 60: /* stq */ ++ case 60: /* pstq */ + op->type = MKOP(STORE, PREFIXED, 16); + break; + case 61: /* pstd */ +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 0add963a849b3..72e1b51beb10c 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -303,7 +303,6 @@ static inline void cmo_account_page_fault(void) + static inline void cmo_account_page_fault(void) { } + #endif /* CONFIG_PPC_SMLPAR */ + +-#ifdef CONFIG_PPC_BOOK3S + static void sanity_check_fault(bool is_write, bool is_user, + unsigned long error_code, unsigned long address) + { +@@ -320,6 +319,9 @@ static void sanity_check_fault(bool is_write, bool is_user, + return; + } + ++ if (!IS_ENABLED(CONFIG_PPC_BOOK3S)) ++ return; ++ + /* + * For hash translation mode, we should never get a + * PROTFAULT. Any update to pte to reduce access will result in us +@@ -354,10 +356,6 @@ static void sanity_check_fault(bool is_write, bool is_user, + + WARN_ON_ONCE(error_code & DSISR_PROTFAULT); + } +-#else +-static void sanity_check_fault(bool is_write, bool is_user, +- unsigned long error_code, unsigned long address) { } +-#endif /* CONFIG_PPC_BOOK3S */ + + /* + * Define the correct "is_write" bit in error_code based +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c +index 3fc325bebe4df..22eb1c718e622 100644 +--- a/arch/powerpc/mm/mem.c ++++ b/arch/powerpc/mm/mem.c +@@ -532,7 +532,7 @@ void __flush_dcache_icache(void *p) + * space occurs, before returning to user space. + */ + +- if (cpu_has_feature(MMU_FTR_TYPE_44x)) ++ if (mmu_has_feature(MMU_FTR_TYPE_44x)) + return; + + invalidate_icache_range(addr, addr + PAGE_SIZE); +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index 08643cba14948..43599e671d383 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -137,6 +137,9 @@ static void pmao_restore_workaround(bool ebb) { } + + bool is_sier_available(void) + { ++ if (!ppmu) ++ return false; ++ + if (ppmu->flags & PPMU_HAS_SIER) + return true; + +@@ -2121,6 +2124,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + local64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); + ++ /* ++ * Due to hardware limitation, sometimes SIAR could sample a kernel ++ * address even when freeze on supervisor state (kernel) is set in ++ * MMCR2. Check attr.exclude_kernel and address to drop the sample in ++ * these cases. ++ */ ++ if (event->attr.exclude_kernel && record) ++ if (is_kernel_addr(mfspr(SPRN_SIAR))) ++ record = 0; ++ + /* + * Finally record data if requested. + */ +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c +index 2848904df6383..e1a21d34c6e49 100644 +--- a/arch/powerpc/perf/isa207-common.c ++++ b/arch/powerpc/perf/isa207-common.c +@@ -247,6 +247,9 @@ void isa207_get_mem_weight(u64 *weight) + u64 sier = mfspr(SPRN_SIER); + u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; + ++ if (cpu_has_feature(CPU_FTR_ARCH_31)) ++ mantissa = P10_MMCRA_THR_CTR_MANT(mmcra); ++ + if (val == 0 || val == 7) + *weight = 0; + else +@@ -311,9 +314,11 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + } + + if (unit >= 6 && unit <= 9) { +- if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) { +- mask |= CNST_L2L3_GROUP_MASK; +- value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT); ++ if (cpu_has_feature(CPU_FTR_ARCH_31)) { ++ if (unit == 6) { ++ mask |= CNST_L2L3_GROUP_MASK; ++ value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT); ++ } + } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { + mask |= CNST_CACHE_GROUP_MASK; + value |= CNST_CACHE_GROUP_VAL(event & 0xff); +@@ -339,12 +344,22 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + value |= CNST_L1_QUAL_VAL(cache); + } + ++ if (cpu_has_feature(CPU_FTR_ARCH_31)) { ++ mask |= CNST_RADIX_SCOPE_GROUP_MASK; ++ value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT); ++ } ++ + if (is_event_marked(event)) { + mask |= CNST_SAMPLE_MASK; + value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); + } + +- if (cpu_has_feature(CPU_FTR_ARCH_300)) { ++ if (cpu_has_feature(CPU_FTR_ARCH_31)) { ++ if (event_is_threshold(event)) { ++ mask |= CNST_THRESH_CTL_SEL_MASK; ++ value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT); ++ } ++ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { + mask |= CNST_THRESH_MASK; + value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); +@@ -456,6 +471,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev, + } + } + ++ /* Set RADIX_SCOPE_QUAL bit */ ++ if (cpu_has_feature(CPU_FTR_ARCH_31)) { ++ val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) & ++ p10_EVENT_RADIX_SCOPE_QUAL_MASK; ++ mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT; ++ } ++ + if (is_event_marked(event[i])) { + mmcra |= MMCRA_SAMPLE_ENABLE; + +diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h +index 7025de5e60e7d..454b32c314406 100644 +--- a/arch/powerpc/perf/isa207-common.h ++++ b/arch/powerpc/perf/isa207-common.h +@@ -101,6 +101,9 @@ + #define p10_EVENT_CACHE_SEL_MASK 0x3ull + #define p10_EVENT_MMCR3_MASK 0x7fffull + #define p10_EVENT_MMCR3_SHIFT 45 ++#define p10_EVENT_RADIX_SCOPE_QUAL_SHIFT 9 ++#define p10_EVENT_RADIX_SCOPE_QUAL_MASK 0x1 ++#define p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT 45 + + #define p10_EVENT_VALID_MASK \ + ((p10_SDAR_MODE_MASK << p10_SDAR_MODE_SHIFT | \ +@@ -112,6 +115,7 @@ + (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \ + (p10_EVENT_MMCR3_MASK << p10_EVENT_MMCR3_SHIFT) | \ + (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ ++ (p10_EVENT_RADIX_SCOPE_QUAL_MASK << p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) | \ + EVENT_LINUX_MASK | \ + EVENT_PSEL_MASK)) + /* +@@ -125,9 +129,9 @@ + * + * 28 24 20 16 12 8 4 0 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | +- * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] +- * | | | | +- * BHRB IFM -* | | | Count of events for each PMC. ++ * [ ] | [ ] | [ sample ] [ ] [6] [5] [4] [3] [2] [1] ++ * | | | | | ++ * BHRB IFM -* | | |*radix_scope | Count of events for each PMC. + * EBB -* | | p1, p2, p3, p4, p5, p6. + * L1 I/D qualifier -* | + * nc - number of counters -* +@@ -145,6 +149,9 @@ + #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32) + #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK) + ++#define CNST_THRESH_CTL_SEL_VAL(v) (((v) & 0x7ffull) << 32) ++#define CNST_THRESH_CTL_SEL_MASK CNST_THRESH_CTL_SEL_VAL(0x7ff) ++ + #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) + #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) + +@@ -165,6 +172,9 @@ + #define CNST_L2L3_GROUP_VAL(v) (((v) & 0x1full) << 55) + #define CNST_L2L3_GROUP_MASK CNST_L2L3_GROUP_VAL(0x1f) + ++#define CNST_RADIX_SCOPE_GROUP_VAL(v) (((v) & 0x1ull) << 21) ++#define CNST_RADIX_SCOPE_GROUP_MASK CNST_RADIX_SCOPE_GROUP_VAL(1) ++ + /* + * For NC we are counting up to 4 events. This requires three bits, and we need + * the fifth event to overflow and set the 4th bit. To achieve that we bias the +@@ -221,6 +231,10 @@ + #define MMCRA_THR_CTR_EXP(v) (((v) >> MMCRA_THR_CTR_EXP_SHIFT) &\ + MMCRA_THR_CTR_EXP_MASK) + ++#define P10_MMCRA_THR_CTR_MANT_MASK 0xFFul ++#define P10_MMCRA_THR_CTR_MANT(v) (((v) >> MMCRA_THR_CTR_MANT_SHIFT) &\ ++ P10_MMCRA_THR_CTR_MANT_MASK) ++ + /* MMCRA Threshold Compare bit constant for power9 */ + #define p9_MMCRA_THR_CMP_SHIFT 45 + +diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c +index 9dbe8f9b89b4f..cf44fb7446130 100644 +--- a/arch/powerpc/perf/power10-pmu.c ++++ b/arch/powerpc/perf/power10-pmu.c +@@ -23,10 +23,10 @@ + * + * 28 24 20 16 12 8 4 0 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | +- * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] m [ pmcxsel ] +- * | | | | | | +- * | | | | | *- mark +- * | | | *- L1/L2/L3 cache_sel | ++ * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] | m [ pmcxsel ] ++ * | | | | | | | ++ * | | | | | | *- mark ++ * | | | *- L1/L2/L3 cache_sel | |*-radix_scope_qual + * | | sdar_mode | + * | *- sampling mode for marked events *- combine + * | +@@ -59,6 +59,7 @@ + * + * MMCR1[16] = cache_sel[0] + * MMCR1[17] = cache_sel[1] ++ * MMCR1[18] = radix_scope_qual + * + * if mark: + * MMCRA[63] = 1 (SAMPLE_ENABLE) +@@ -175,6 +176,7 @@ PMU_FORMAT_ATTR(src_sel, "config:45-46"); + PMU_FORMAT_ATTR(invert_bit, "config:47"); + PMU_FORMAT_ATTR(src_mask, "config:48-53"); + PMU_FORMAT_ATTR(src_match, "config:54-59"); ++PMU_FORMAT_ATTR(radix_scope, "config:9"); + + static struct attribute *power10_pmu_format_attr[] = { + &format_attr_event.attr, +@@ -194,6 +196,7 @@ static struct attribute *power10_pmu_format_attr[] = { + &format_attr_invert_bit.attr, + &format_attr_src_mask.attr, + &format_attr_src_match.attr, ++ &format_attr_radix_scope.attr, + NULL, + }; + +diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c +index aed4bc75f3520..aef179fcbd4f8 100644 +--- a/arch/powerpc/platforms/8xx/micropatch.c ++++ b/arch/powerpc/platforms/8xx/micropatch.c +@@ -360,6 +360,17 @@ void __init cpm_load_patch(cpm8xx_t *cp) + if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) { + smc_uart_t *smp; + ++ if (IS_ENABLED(CONFIG_PPC_EARLY_DEBUG_CPM)) { ++ int i; ++ ++ for (i = 0; i < sizeof(*smp); i += 4) { ++ u32 __iomem *src = (u32 __iomem *)&cp->cp_dparam[PROFF_SMC1 + i]; ++ u32 __iomem *dst = (u32 __iomem *)&cp->cp_dparam[PROFF_DSP1 + i]; ++ ++ out_be32(dst, in_be32(src)); ++ } ++ } ++ + smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1]; + out_be16(&smp->smc_rpbase, 0x1ec0); + smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2]; +diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype +index c194c4ae8bc7d..32a9c4c09b989 100644 +--- a/arch/powerpc/platforms/Kconfig.cputype ++++ b/arch/powerpc/platforms/Kconfig.cputype +@@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx + select PPC_HAVE_PMU_SUPPORT + select PPC_HAVE_KUEP + select PPC_HAVE_KUAP +- select HAVE_ARCH_VMAP_STACK if !ADB_PMU ++ select HAVE_ARCH_VMAP_STACK + + config PPC_85xx + bool "Freescale 85xx" +diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S +index 7e0f8ba6e54a5..d497a60003d2d 100644 +--- a/arch/powerpc/platforms/powermac/sleep.S ++++ b/arch/powerpc/platforms/powermac/sleep.S +@@ -44,7 +44,8 @@ + #define SL_TB 0xa0 + #define SL_R2 0xa8 + #define SL_CR 0xac +-#define SL_R12 0xb0 /* r12 to r31 */ ++#define SL_LR 0xb0 ++#define SL_R12 0xb4 /* r12 to r31 */ + #define SL_SIZE (SL_R12 + 80) + + .section .text +@@ -63,105 +64,107 @@ _GLOBAL(low_sleep_handler) + blr + #else + mflr r0 +- stw r0,4(r1) +- stwu r1,-SL_SIZE(r1) ++ lis r11,sleep_storage@ha ++ addi r11,r11,sleep_storage@l ++ stw r0,SL_LR(r11) + mfcr r0 +- stw r0,SL_CR(r1) +- stw r2,SL_R2(r1) +- stmw r12,SL_R12(r1) ++ stw r0,SL_CR(r11) ++ stw r1,SL_SP(r11) ++ stw r2,SL_R2(r11) ++ stmw r12,SL_R12(r11) + + /* Save MSR & SDR1 */ + mfmsr r4 +- stw r4,SL_MSR(r1) ++ stw r4,SL_MSR(r11) + mfsdr1 r4 +- stw r4,SL_SDR1(r1) ++ stw r4,SL_SDR1(r11) + + /* Get a stable timebase and save it */ + 1: mftbu r4 +- stw r4,SL_TB(r1) ++ stw r4,SL_TB(r11) + mftb r5 +- stw r5,SL_TB+4(r1) ++ stw r5,SL_TB+4(r11) + mftbu r3 + cmpw r3,r4 + bne 1b + + /* Save SPRGs */ + mfsprg r4,0 +- stw r4,SL_SPRG0(r1) ++ stw r4,SL_SPRG0(r11) + mfsprg r4,1 +- stw r4,SL_SPRG0+4(r1) ++ stw r4,SL_SPRG0+4(r11) + mfsprg r4,2 +- stw r4,SL_SPRG0+8(r1) ++ stw r4,SL_SPRG0+8(r11) + mfsprg r4,3 +- stw r4,SL_SPRG0+12(r1) ++ stw r4,SL_SPRG0+12(r11) + + /* Save BATs */ + mfdbatu r4,0 +- stw r4,SL_DBAT0(r1) ++ stw r4,SL_DBAT0(r11) + mfdbatl r4,0 +- stw r4,SL_DBAT0+4(r1) ++ stw r4,SL_DBAT0+4(r11) + mfdbatu r4,1 +- stw r4,SL_DBAT1(r1) ++ stw r4,SL_DBAT1(r11) + mfdbatl r4,1 +- stw r4,SL_DBAT1+4(r1) ++ stw r4,SL_DBAT1+4(r11) + mfdbatu r4,2 +- stw r4,SL_DBAT2(r1) ++ stw r4,SL_DBAT2(r11) + mfdbatl r4,2 +- stw r4,SL_DBAT2+4(r1) ++ stw r4,SL_DBAT2+4(r11) + mfdbatu r4,3 +- stw r4,SL_DBAT3(r1) ++ stw r4,SL_DBAT3(r11) + mfdbatl r4,3 +- stw r4,SL_DBAT3+4(r1) ++ stw r4,SL_DBAT3+4(r11) + mfibatu r4,0 +- stw r4,SL_IBAT0(r1) ++ stw r4,SL_IBAT0(r11) + mfibatl r4,0 +- stw r4,SL_IBAT0+4(r1) ++ stw r4,SL_IBAT0+4(r11) + mfibatu r4,1 +- stw r4,SL_IBAT1(r1) ++ stw r4,SL_IBAT1(r11) + mfibatl r4,1 +- stw r4,SL_IBAT1+4(r1) ++ stw r4,SL_IBAT1+4(r11) + mfibatu r4,2 +- stw r4,SL_IBAT2(r1) ++ stw r4,SL_IBAT2(r11) + mfibatl r4,2 +- stw r4,SL_IBAT2+4(r1) ++ stw r4,SL_IBAT2+4(r11) + mfibatu r4,3 +- stw r4,SL_IBAT3(r1) ++ stw r4,SL_IBAT3(r11) + mfibatl r4,3 +- stw r4,SL_IBAT3+4(r1) ++ stw r4,SL_IBAT3+4(r11) + + BEGIN_MMU_FTR_SECTION + mfspr r4,SPRN_DBAT4U +- stw r4,SL_DBAT4(r1) ++ stw r4,SL_DBAT4(r11) + mfspr r4,SPRN_DBAT4L +- stw r4,SL_DBAT4+4(r1) ++ stw r4,SL_DBAT4+4(r11) + mfspr r4,SPRN_DBAT5U +- stw r4,SL_DBAT5(r1) ++ stw r4,SL_DBAT5(r11) + mfspr r4,SPRN_DBAT5L +- stw r4,SL_DBAT5+4(r1) ++ stw r4,SL_DBAT5+4(r11) + mfspr r4,SPRN_DBAT6U +- stw r4,SL_DBAT6(r1) ++ stw r4,SL_DBAT6(r11) + mfspr r4,SPRN_DBAT6L +- stw r4,SL_DBAT6+4(r1) ++ stw r4,SL_DBAT6+4(r11) + mfspr r4,SPRN_DBAT7U +- stw r4,SL_DBAT7(r1) ++ stw r4,SL_DBAT7(r11) + mfspr r4,SPRN_DBAT7L +- stw r4,SL_DBAT7+4(r1) ++ stw r4,SL_DBAT7+4(r11) + mfspr r4,SPRN_IBAT4U +- stw r4,SL_IBAT4(r1) ++ stw r4,SL_IBAT4(r11) + mfspr r4,SPRN_IBAT4L +- stw r4,SL_IBAT4+4(r1) ++ stw r4,SL_IBAT4+4(r11) + mfspr r4,SPRN_IBAT5U +- stw r4,SL_IBAT5(r1) ++ stw r4,SL_IBAT5(r11) + mfspr r4,SPRN_IBAT5L +- stw r4,SL_IBAT5+4(r1) ++ stw r4,SL_IBAT5+4(r11) + mfspr r4,SPRN_IBAT6U +- stw r4,SL_IBAT6(r1) ++ stw r4,SL_IBAT6(r11) + mfspr r4,SPRN_IBAT6L +- stw r4,SL_IBAT6+4(r1) ++ stw r4,SL_IBAT6+4(r11) + mfspr r4,SPRN_IBAT7U +- stw r4,SL_IBAT7(r1) ++ stw r4,SL_IBAT7(r11) + mfspr r4,SPRN_IBAT7L +- stw r4,SL_IBAT7+4(r1) ++ stw r4,SL_IBAT7+4(r11) + END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + + /* Backup various CPU config stuffs */ +@@ -180,9 +183,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + lis r5,grackle_wake_up@ha + addi r5,r5,grackle_wake_up@l + tophys(r5,r5) +- stw r5,SL_PC(r1) ++ stw r5,SL_PC(r11) + lis r4,KERNELBASE@h +- tophys(r5,r1) ++ tophys(r5,r11) + addi r5,r5,SL_PC + lis r6,MAGIC@ha + addi r6,r6,MAGIC@l +@@ -194,12 +197,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + tophys(r3,r3) + stw r3,0x80(r4) + stw r5,0x84(r4) +- /* Store a pointer to our backup storage into +- * a kernel global +- */ +- lis r3,sleep_storage@ha +- addi r3,r3,sleep_storage@l +- stw r5,0(r3) + + .globl low_cpu_offline_self + low_cpu_offline_self: +@@ -279,7 +276,7 @@ _GLOBAL(core99_wake_up) + lis r3,sleep_storage@ha + addi r3,r3,sleep_storage@l + tophys(r3,r3) +- lwz r1,0(r3) ++ addi r1,r3,SL_PC + + /* Pass thru to older resume code ... */ + _ASM_NOKPROBE_SYMBOL(core99_wake_up) +@@ -399,13 +396,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + blt 1b + sync + +- /* restore the MSR and turn on the MMU */ +- lwz r3,SL_MSR(r1) +- bl turn_on_mmu +- +- /* get back the stack pointer */ +- tovirt(r1,r1) +- + /* Restore TB */ + li r3,0 + mttbl r3 +@@ -419,28 +409,24 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + mtcr r0 + lwz r2,SL_R2(r1) + lmw r12,SL_R12(r1) +- addi r1,r1,SL_SIZE +- lwz r0,4(r1) +- mtlr r0 +- blr +-_ASM_NOKPROBE_SYMBOL(grackle_wake_up) + +-turn_on_mmu: +- mflr r4 +- tovirt(r4,r4) ++ /* restore the MSR and SP and turn on the MMU and return */ ++ lwz r3,SL_MSR(r1) ++ lwz r4,SL_LR(r1) ++ lwz r1,SL_SP(r1) + mtsrr0 r4 + mtsrr1 r3 + sync + isync + rfi +-_ASM_NOKPROBE_SYMBOL(turn_on_mmu) ++_ASM_NOKPROBE_SYMBOL(grackle_wake_up) + + #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ + +- .section .data ++ .section .bss + .balign L1_CACHE_BYTES + sleep_storage: +- .long 0 ++ .space SL_SIZE + .balign L1_CACHE_BYTES, 0 + + #endif /* CONFIG_PPC_BOOK3S_32 */ +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c +index 6828108486f83..0e42fe2d7b6ac 100644 +--- a/arch/powerpc/platforms/powernv/memtrace.c ++++ b/arch/powerpc/platforms/powernv/memtrace.c +@@ -30,6 +30,7 @@ struct memtrace_entry { + char name[16]; + }; + ++static DEFINE_MUTEX(memtrace_mutex); + static u64 memtrace_size; + + static struct memtrace_entry *memtrace_array; +@@ -67,6 +68,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg) + return 0; + } + ++static void memtrace_clear_range(unsigned long start_pfn, ++ unsigned long nr_pages) ++{ ++ unsigned long pfn; ++ ++ /* ++ * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM ++ * does not apply, avoid passing around "struct page" and use ++ * clear_page() instead directly. ++ */ ++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { ++ if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) ++ cond_resched(); ++ clear_page(__va(PFN_PHYS(pfn))); ++ } ++} ++ + /* called with device_hotplug_lock held */ + static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) + { +@@ -111,6 +129,11 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) + lock_device_hotplug(); + for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { ++ /* ++ * Clear the range while we still have a linear ++ * mapping. ++ */ ++ memtrace_clear_range(base_pfn, nr_pages); + /* + * Remove memory in memory block size chunks so that + * iomem resources are always split to the same size and +@@ -257,6 +280,7 @@ static int memtrace_online(void) + + static int memtrace_enable_set(void *data, u64 val) + { ++ int rc = -EAGAIN; + u64 bytes; + + /* +@@ -269,25 +293,31 @@ static int memtrace_enable_set(void *data, u64 val) + return -EINVAL; + } + ++ mutex_lock(&memtrace_mutex); ++ + /* Re-add/online previously removed/offlined memory */ + if (memtrace_size) { + if (memtrace_online()) +- return -EAGAIN; ++ goto out_unlock; + } + +- if (!val) +- return 0; ++ if (!val) { ++ rc = 0; ++ goto out_unlock; ++ } + + /* Offline and remove memory */ + if (memtrace_init_regions_runtime(val)) +- return -EINVAL; ++ goto out_unlock; + + if (memtrace_init_debugfs()) +- return -EINVAL; ++ goto out_unlock; + + memtrace_size = val; +- +- return 0; ++ rc = 0; ++out_unlock: ++ mutex_unlock(&memtrace_mutex); ++ return rc; + } + + static int memtrace_enable_get(void *data, u64 *val) +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c +index abeaa533b976b..b711dc3262a30 100644 +--- a/arch/powerpc/platforms/powernv/npu-dma.c ++++ b/arch/powerpc/platforms/powernv/npu-dma.c +@@ -385,7 +385,8 @@ static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group) + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + +- if (!pe->table_group.ops->take_ownership) ++ if (!pe->table_group.ops || ++ !pe->table_group.ops->take_ownership) + continue; + pe->table_group.ops->take_ownership(&pe->table_group); + } +@@ -401,7 +402,8 @@ static void pnv_npu_peers_release_ownership( + for (i = 0; i < npucomp->pe_num; ++i) { + struct pnv_ioda_pe *pe = npucomp->pe[i]; + +- if (!pe->table_group.ops->release_ownership) ++ if (!pe->table_group.ops || ++ !pe->table_group.ops->release_ownership) + continue; + pe->table_group.ops->release_ownership(&pe->table_group); + } +@@ -623,6 +625,11 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, + return -ENODEV; + + hose = pci_bus_to_host(npdev->bus); ++ if (hose->npu == NULL) { ++ dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); ++ return 0; ++ } ++ + nphb = hose->private_data; + + dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n", +@@ -670,6 +677,11 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) + return -ENODEV; + + hose = pci_bus_to_host(npdev->bus); ++ if (hose->npu == NULL) { ++ dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); ++ return 0; ++ } ++ + nphb = hose->private_data; + + dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", +diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c +index c4434f20f42fa..28aac933a4391 100644 +--- a/arch/powerpc/platforms/powernv/pci-sriov.c ++++ b/arch/powerpc/platforms/powernv/pci-sriov.c +@@ -422,7 +422,7 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) + { + struct pnv_iov_data *iov; + struct pnv_phb *phb; +- unsigned int win; ++ int win; + struct resource *res; + int i, j; + int64_t rc; +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c +index a02012f1b04af..12cbffd3c2e32 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c +@@ -746,6 +746,7 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add) + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_warn("Could not find CPU root node in device tree\n"); ++ kfree(cpu_drcs); + return -1; + } + +diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c +index 81e0ac58d6204..64b36a93c33a6 100644 +--- a/arch/powerpc/platforms/pseries/suspend.c ++++ b/arch/powerpc/platforms/pseries/suspend.c +@@ -13,7 +13,6 @@ + #include <asm/mmu.h> + #include <asm/rtas.h> + #include <asm/topology.h> +-#include "../../kernel/cacheinfo.h" + + static u64 stream_id; + static struct device suspend_dev; +@@ -78,9 +77,7 @@ static void pseries_suspend_enable_irqs(void) + * Update configuration which can be modified based on device tree + * changes during resume. + */ +- cacheinfo_cpu_offline(smp_processor_id()); + post_mobility_fixup(); +- cacheinfo_cpu_online(smp_processor_id()); + } + + /** +@@ -187,7 +184,6 @@ static struct bus_type suspend_subsys = { + + static const struct platform_suspend_ops pseries_suspend_ops = { + .valid = suspend_valid_only_mem, +- .begin = pseries_suspend_begin, + .prepare_late = pseries_prepare_late, + .enter = pseries_suspend_enter, + }; +diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c +index 5c1a50912229a..9b0d85bff021e 100644 +--- a/arch/powerpc/xmon/nonstdio.c ++++ b/arch/powerpc/xmon/nonstdio.c +@@ -178,7 +178,7 @@ void xmon_printf(const char *format, ...) + + if (n && rc == 0) { + /* No udbg hooks, fallback to printk() - dangerous */ +- printk("%s", xmon_outbuf); ++ pr_cont("%s", xmon_outbuf); + } + } + +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index 55c43a6c91112..5559edf36756c 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -1383,6 +1383,7 @@ static long check_bp_loc(unsigned long addr) + return 1; + } + ++#ifndef CONFIG_PPC_8xx + static int find_free_data_bpt(void) + { + int i; +@@ -1394,6 +1395,7 @@ static int find_free_data_bpt(void) + printf("Couldn't find free breakpoint register\n"); + return -1; + } ++#endif + + static void print_data_bpts(void) + { +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index 8e577f14f1205..e4133c20744ce 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -174,7 +174,7 @@ void __init setup_bootmem(void) + * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed + * as it is unusable by kernel. + */ +- memblock_enforce_memory_limit(mem_start - PAGE_OFFSET); ++ memblock_enforce_memory_limit(-PAGE_OFFSET); + + /* Reserve from the start of the kernel to the end of the kernel */ + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index 92beb14446449..6343dca0dbeb6 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -110,9 +110,9 @@ _LPP_OFFSET = __LC_LPP + #endif + .endm + +- .macro SWITCH_ASYNC savearea,timer ++ .macro SWITCH_ASYNC savearea,timer,clock + tmhh %r8,0x0001 # interrupting from user ? +- jnz 2f ++ jnz 4f + #if IS_ENABLED(CONFIG_KVM) + lgr %r14,%r9 + larl %r13,.Lsie_gmap +@@ -125,10 +125,26 @@ _LPP_OFFSET = __LC_LPP + #endif + 0: larl %r13,.Lpsw_idle_exit + cgr %r13,%r9 +- jne 1f ++ jne 3f + +- mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK +- mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER ++ larl %r1,smp_cpu_mtid ++ llgf %r1,0(%r1) ++ ltgr %r1,%r1 ++ jz 2f # no SMT, skip mt_cycles calculation ++ .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) ++ larl %r3,mt_cycles ++ ag %r3,__LC_PERCPU_OFFSET ++ la %r4,__SF_EMPTY+16(%r15) ++1: lg %r0,0(%r3) ++ slg %r0,0(%r4) ++ alg %r0,64(%r4) ++ stg %r0,0(%r3) ++ la %r3,8(%r3) ++ la %r4,8(%r4) ++ brct %r1,1b ++ ++2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock ++ mvc __TIMER_IDLE_EXIT(8,%r2), \timer + # account system time going idle + ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT + +@@ -146,17 +162,17 @@ _LPP_OFFSET = __LC_LPP + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) + + nihh %r8,0xfcfd # clear wait state and irq bits +-1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? ++3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? + slgr %r14,%r15 + srag %r14,%r14,STACK_SHIFT +- jnz 3f ++ jnz 5f + CHECK_STACK \savearea + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) +- j 4f +-2: UPDATE_VTIME %r14,%r15,\timer ++ j 6f ++4: UPDATE_VTIME %r14,%r15,\timer + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP +-3: lg %r15,__LC_ASYNC_STACK # load async stack +-4: la %r11,STACK_FRAME_OVERHEAD(%r15) ++5: lg %r15,__LC_ASYNC_STACK # load async stack ++6: la %r11,STACK_FRAME_OVERHEAD(%r15) + .endm + + .macro UPDATE_VTIME w1,w2,enter_timer +@@ -745,7 +761,7 @@ ENTRY(io_int_handler) + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r12,__LC_CURRENT + lmg %r8,%r9,__LC_IO_OLD_PSW +- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER ++ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK + stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 +@@ -945,7 +961,7 @@ ENTRY(ext_int_handler) + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r12,__LC_CURRENT + lmg %r8,%r9,__LC_EXT_OLD_PSW +- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER ++ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK + stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 +@@ -1167,7 +1183,7 @@ ENTRY(mcck_int_handler) + TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID + jno .Lmcck_panic + 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off +- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER ++ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK + .Lmcck_skip: + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index 390d97daa2b3f..3a0d545f0ce84 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -896,24 +896,12 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid) + /* Upping and downing of CPUs */ + int __cpu_up(unsigned int cpu, struct task_struct *tidle) + { +- struct pcpu *pcpu; +- int base, i, rc; ++ struct pcpu *pcpu = pcpu_devices + cpu; ++ int rc; + +- pcpu = pcpu_devices + cpu; + if (pcpu->state != CPU_STATE_CONFIGURED) + return -EIO; +- base = smp_get_base_cpu(cpu); +- for (i = 0; i <= smp_cpu_mtid; i++) { +- if (base + i < nr_cpu_ids) +- if (cpu_online(base + i)) +- break; +- } +- /* +- * If this is the first CPU of the core to get online +- * do an initial CPU reset. +- */ +- if (i > smp_cpu_mtid && +- pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != ++ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + SIGP_CC_ORDER_CODE_ACCEPTED) + return -EIO; + +diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c +index 7c988994931f0..6bad84c372dcb 100644 +--- a/arch/s390/lib/test_unwind.c ++++ b/arch/s390/lib/test_unwind.c +@@ -205,12 +205,15 @@ static noinline int unwindme_func3(struct unwindme *u) + /* This function must appear in the backtrace. */ + static noinline int unwindme_func2(struct unwindme *u) + { ++ unsigned long flags; + int rc; + + if (u->flags & UWM_SWITCH_STACK) { +- preempt_disable(); ++ local_irq_save(flags); ++ local_mcck_disable(); + rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); +- preempt_enable(); ++ local_mcck_enable(); ++ local_irq_restore(flags); + return rc; + } else { + return unwindme_func3(u); +diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S +index 5a10ce34b95d1..3d1c31e0cf3dd 100644 +--- a/arch/s390/purgatory/head.S ++++ b/arch/s390/purgatory/head.S +@@ -62,14 +62,15 @@ + jh 10b + .endm + +-.macro START_NEXT_KERNEL base ++.macro START_NEXT_KERNEL base subcode + lg %r4,kernel_entry-\base(%r13) + lg %r5,load_psw_mask-\base(%r13) + ogr %r4,%r5 + stg %r4,0(%r0) + + xgr %r0,%r0 +- diag %r0,%r0,0x308 ++ lghi %r1,\subcode ++ diag %r0,%r1,0x308 + .endm + + .text +@@ -123,7 +124,7 @@ ENTRY(purgatory_start) + je .start_crash_kernel + + /* start normal kernel */ +- START_NEXT_KERNEL .base_crash ++ START_NEXT_KERNEL .base_crash 0 + + .return_old_kernel: + lmg %r6,%r15,gprregs-.base_crash(%r13) +@@ -227,7 +228,7 @@ ENTRY(purgatory_start) + MEMCPY %r9,%r10,%r11 + + /* start crash kernel */ +- START_NEXT_KERNEL .base_dst ++ START_NEXT_KERNEL .base_dst 1 + + + load_psw_mask: +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index 96edf64d4fb30..182bb7bdaa0a1 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -2894,7 +2894,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) + if (!page) + return NULL; + if (!pgtable_pte_page_ctor(page)) { +- free_unref_page(page); ++ __free_page(page); + return NULL; + } + return (pte_t *) page_address(page); +diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c +index 4d80526a4236e..d8845d4aac6a7 100644 +--- a/arch/um/drivers/chan_user.c ++++ b/arch/um/drivers/chan_user.c +@@ -26,10 +26,10 @@ int generic_read(int fd, char *c_out, void *unused) + n = read(fd, c_out, sizeof(*c_out)); + if (n > 0) + return n; +- else if (errno == EAGAIN) +- return 0; + else if (n == 0) + return -EIO; ++ else if (errno == EAGAIN) ++ return 0; + return -errno; + } + +diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c +index fc7f1e7467032..87ca4a47cd66e 100644 +--- a/arch/um/drivers/xterm.c ++++ b/arch/um/drivers/xterm.c +@@ -18,6 +18,7 @@ + struct xterm_chan { + int pid; + int helper_pid; ++ int chan_fd; + char *title; + int device; + int raw; +@@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts) + return NULL; + *data = ((struct xterm_chan) { .pid = -1, + .helper_pid = -1, ++ .chan_fd = -1, + .device = device, + .title = opts->xterm_title, + .raw = opts->raw } ); +@@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d, + goto out_kill; + } + ++ data->chan_fd = fd; + new = xterm_fd(fd, &data->helper_pid); + if (new < 0) { + err = new; +@@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d) + os_kill_process(data->helper_pid, 0); + data->helper_pid = -1; + ++ if (data->chan_fd != -1) ++ os_close_file(data->chan_fd); + os_close_file(fd); + } + +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c +index 3d109ff3309b2..8dafc3f2add42 100644 +--- a/arch/um/kernel/time.c ++++ b/arch/um/kernel/time.c +@@ -260,11 +260,6 @@ static void __time_travel_add_event(struct time_travel_event *e, + struct time_travel_event *tmp; + bool inserted = false; + +- if (WARN(time_travel_mode == TT_MODE_BASIC && +- e != &time_travel_timer_event, +- "only timer events can be handled in basic mode")) +- return; +- + if (e->pending) + return; + +diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c +index d508310ee5e1e..f1732c308c615 100644 +--- a/arch/um/os-Linux/irq.c ++++ b/arch/um/os-Linux/irq.c +@@ -48,7 +48,7 @@ int os_epoll_triggered(int index, int events) + int os_event_mask(int irq_type) + { + if (irq_type == IRQ_READ) +- return EPOLLIN | EPOLLPRI; ++ return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP; + if (irq_type == IRQ_WRITE) + return EPOLLOUT; + return 0; +diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c +index 1d7558dac75f3..a3dd61521d240 100644 +--- a/arch/um/os-Linux/umid.c ++++ b/arch/um/os-Linux/umid.c +@@ -137,20 +137,13 @@ static inline int is_umdir_used(char *dir) + { + char pid[sizeof("nnnnnnnnn")], *end, *file; + int dead, fd, p, n, err; +- size_t filelen; ++ size_t filelen = strlen(dir) + sizeof("/pid") + 1; + +- err = asprintf(&file, "%s/pid", dir); +- if (err < 0) +- return 0; +- +- filelen = strlen(file); ++ file = malloc(filelen); ++ if (!file) ++ return -ENOMEM; + +- n = snprintf(file, filelen, "%s/pid", dir); +- if (n >= filelen) { +- printk(UM_KERN_ERR "is_umdir_used - pid filename too long\n"); +- err = -E2BIG; +- goto out; +- } ++ snprintf(file, filelen, "%s/pid", dir); + + dead = 0; + fd = open(file, O_RDONLY); +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index af457f8cb29dd..7d4d89fa8647a 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -257,7 +257,8 @@ static struct event_constraint intel_icl_event_constraints[] = { + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ +- INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ ++ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ ++ INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ + INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ + INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), +@@ -5464,7 +5465,7 @@ __init int intel_pmu_init(void) + mem_attr = icl_events_attrs; + td_attr = icl_td_events_attrs; + tsx_attr = icl_tsx_events_attrs; +- x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02); ++ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); + x86_pmu.lbr_pt_coexist = true; + intel_pmu_pebs_data_source_skl(pmem); + x86_pmu.update_topdown_event = icl_update_topdown_event; +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c +index 8961653c5dd2b..e2b0efcba1017 100644 +--- a/arch/x86/events/intel/lbr.c ++++ b/arch/x86/events/intel/lbr.c +@@ -919,7 +919,7 @@ static __always_inline bool get_lbr_predicted(u64 info) + return !(info & LBR_INFO_MISPRED); + } + +-static __always_inline bool get_lbr_cycles(u64 info) ++static __always_inline u16 get_lbr_cycles(u64 info) + { + if (static_cpu_has(X86_FEATURE_ARCH_LBR) && + !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID)) +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 4e3099d9ae625..57af25cb44f63 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -259,6 +259,7 @@ static inline u64 native_x2apic_icr_read(void) + + extern int x2apic_mode; + extern int x2apic_phys; ++extern void __init x2apic_set_max_apicid(u32 apicid); + extern void __init check_x2apic(void); + extern void x2apic_setup(void); + static inline int x2apic_enabled(void) +diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h +index 86b63c7feab75..86b2e0dcc4bfe 100644 +--- a/arch/x86/include/asm/cacheinfo.h ++++ b/arch/x86/include/asm/cacheinfo.h +@@ -2,7 +2,7 @@ + #ifndef _ASM_X86_CACHEINFO_H + #define _ASM_X86_CACHEINFO_H + +-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); +-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); ++void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu); ++void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu); + + #endif /* _ASM_X86_CACHEINFO_H */ +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h +index a0f147893a041..fc25c88c7ff29 100644 +--- a/arch/x86/include/asm/mce.h ++++ b/arch/x86/include/asm/mce.h +@@ -177,7 +177,8 @@ enum mce_notifier_prios { + MCE_PRIO_EXTLOG, + MCE_PRIO_UC, + MCE_PRIO_EARLY, +- MCE_PRIO_CEC ++ MCE_PRIO_CEC, ++ MCE_PRIO_HIGHEST = MCE_PRIO_CEC + }; + + struct notifier_block; +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index b3eef1d5c9037..113f6ca7b8284 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1841,20 +1841,22 @@ static __init void try_to_enable_x2apic(int remap_mode) + return; + + if (remap_mode != IRQ_REMAP_X2APIC_MODE) { +- /* IR is required if there is APIC ID > 255 even when running +- * under KVM ++ /* ++ * Using X2APIC without IR is not architecturally supported ++ * on bare metal but may be supported in guests. + */ +- if (max_physical_apicid > 255 || +- !x86_init.hyper.x2apic_available()) { ++ if (!x86_init.hyper.x2apic_available()) { + pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); + x2apic_disable(); + return; + } + + /* +- * without IR all CPUs can be addressed by IOAPIC/MSI +- * only in physical mode ++ * Without IR, all CPUs can be addressed by IOAPIC/MSI only ++ * in physical mode, and CPUs with an APIC ID that cannnot ++ * be addressed must not be brought online. + */ ++ x2apic_set_max_apicid(255); + x2apic_phys = 1; + } + x2apic_enable(); +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index bc9693841353c..e14eae6d6ea71 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -8,6 +8,12 @@ + int x2apic_phys; + + static struct apic apic_x2apic_phys; ++static u32 x2apic_max_apicid __ro_after_init; ++ ++void __init x2apic_set_max_apicid(u32 apicid) ++{ ++ x2apic_max_apicid = apicid; ++} + + static int __init set_x2apic_phys_mode(char *arg) + { +@@ -98,6 +104,9 @@ static int x2apic_phys_probe(void) + /* Common x2apic functions, also used by x2apic_cluster */ + int x2apic_apic_id_valid(u32 apicid) + { ++ if (x2apic_max_apicid && apicid > x2apic_max_apicid) ++ return 0; ++ + return 1; + } + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 6062ce586b959..2f1fbd8150af7 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -330,7 +330,6 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c) + */ + static void amd_get_topology(struct cpuinfo_x86 *c) + { +- u8 node_id; + int cpu = smp_processor_id(); + + /* get information required for multi-node processors */ +@@ -340,7 +339,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + +- node_id = ecx & 0xff; ++ c->cpu_die_id = ecx & 0xff; + + if (c->x86 == 0x15) + c->cu_id = ebx & 0xff; +@@ -360,15 +359,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c) + if (!err) + c->x86_coreid_bits = get_count_order(c->x86_max_cores); + +- cacheinfo_amd_init_llc_id(c, cpu, node_id); ++ cacheinfo_amd_init_llc_id(c, cpu); + + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); +- node_id = value & 7; ++ c->cpu_die_id = value & 7; + +- per_cpu(cpu_llc_id, cpu) = node_id; ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; + } else + return; + +@@ -393,7 +392,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) + /* Convert the initial APIC ID into the socket ID */ + c->phys_proc_id = c->initial_apicid >> bits; + /* use socket ID also for last level cache */ +- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; + } + + static void amd_detect_ppin(struct cpuinfo_x86 *c) +diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c +index 57074cf3ad7c1..f9ac682e75e78 100644 +--- a/arch/x86/kernel/cpu/cacheinfo.c ++++ b/arch/x86/kernel/cpu/cacheinfo.c +@@ -646,7 +646,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) + return i; + } + +-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) ++void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) + { + /* + * We may have multiple LLCs if L3 caches exist, so check if we +@@ -657,7 +657,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) + + if (c->x86 < 0x17) { + /* LLC is at the node level. */ +- per_cpu(cpu_llc_id, cpu) = node_id; ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; + } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { + /* + * LLC is at the core complex level. +@@ -684,7 +684,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) + } + } + +-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) ++void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) + { + /* + * We may have multiple LLCs if L3 caches exist, so check if we +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c +index ac6c30e5801da..dc0840aae26c1 100644 +--- a/arch/x86/kernel/cpu/hygon.c ++++ b/arch/x86/kernel/cpu/hygon.c +@@ -65,7 +65,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c) + */ + static void hygon_get_topology(struct cpuinfo_x86 *c) + { +- u8 node_id; + int cpu = smp_processor_id(); + + /* get information required for multi-node processors */ +@@ -75,7 +74,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + +- node_id = ecx & 0xff; ++ c->cpu_die_id = ecx & 0xff; + + c->cpu_core_id = ebx & 0xff; + +@@ -93,14 +92,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) + /* Socket ID is ApicId[6] for these processors. */ + c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; + +- cacheinfo_hygon_init_llc_id(c, cpu, node_id); ++ cacheinfo_hygon_init_llc_id(c, cpu); + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); +- node_id = value & 7; ++ c->cpu_die_id = value & 7; + +- per_cpu(cpu_llc_id, cpu) = node_id; ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; + } else + return; + +@@ -123,7 +122,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c) + /* Convert the initial APIC ID into the socket ID */ + c->phys_proc_id = c->initial_apicid >> bits; + /* use socket ID also for last level cache */ +- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; + } + + static void srat_detect_node(struct cpuinfo_x86 *c) +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c +index 32b7099e35111..311688202ea51 100644 +--- a/arch/x86/kernel/cpu/mce/core.c ++++ b/arch/x86/kernel/cpu/mce/core.c +@@ -162,7 +162,8 @@ EXPORT_SYMBOL_GPL(mce_log); + + void mce_register_decode_chain(struct notifier_block *nb) + { +- if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) ++ if (WARN_ON(nb->priority < MCE_PRIO_LOWEST || ++ nb->priority > MCE_PRIO_HIGHEST)) + return; + + blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 547c7abb39f51..39f7d8c3c064b 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -937,6 +937,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) + * So clear it by resetting the current kprobe: + */ + regs->flags &= ~X86_EFLAGS_TF; ++ /* ++ * Since the single step (trap) has been cancelled, ++ * we need to restore BTF here. ++ */ ++ restore_btf(); + + /* + * If the TF flag was set before the kprobe hit, +diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c +index ae64f98ec2ab6..4c09ba1102047 100644 +--- a/arch/x86/kernel/tboot.c ++++ b/arch/x86/kernel/tboot.c +@@ -93,6 +93,7 @@ static struct mm_struct tboot_mm = { + .pgd = swapper_pg_dir, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), ++ .write_protect_seq = SEQCNT_ZERO(tboot_mm.write_protect_seq), + MMAP_LOCK_INITIALIZER(init_mm) + .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(init_mm.mmlist), +diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h +index f7a6e8f83783c..dc921d76e42e8 100644 +--- a/arch/x86/kvm/cpuid.h ++++ b/arch/x86/kvm/cpuid.h +@@ -264,6 +264,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) + return x86_stepping(best->eax); + } + ++static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) ++{ ++ return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || ++ guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || ++ guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || ++ guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); ++} ++ ++static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) ++{ ++ return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || ++ guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)); ++} ++ + static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) + { + return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 566f4d18185b1..5c9630c3f6ba1 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -1127,9 +1127,6 @@ void sev_vm_destroy(struct kvm *kvm) + + int __init sev_hardware_setup(void) + { +- struct sev_user_data_status *status; +- int rc; +- + /* Maximum number of encrypted guests supported simultaneously */ + max_sev_asid = cpuid_ecx(0x8000001F); + +@@ -1148,26 +1145,9 @@ int __init sev_hardware_setup(void) + if (!sev_reclaim_asid_bitmap) + return 1; + +- status = kmalloc(sizeof(*status), GFP_KERNEL); +- if (!status) +- return 1; +- +- /* +- * Check SEV platform status. +- * +- * PLATFORM_STATUS can be called in any state, if we failed to query +- * the PLATFORM status then either PSP firmware does not support SEV +- * feature or SEV firmware is dead. +- */ +- rc = sev_platform_status(status, NULL); +- if (rc) +- goto err; +- + pr_info("SEV supported\n"); + +-err: +- kfree(status); +- return rc; ++ return 0; + } + + void sev_hardware_teardown(void) +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index da7eb4aaf44f8..94b0cb8330451 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2543,10 +2543,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) ++ !guest_has_spec_ctrl_msr(vcpu)) + return 1; + + msr_info->data = svm->spec_ctrl; +@@ -2630,10 +2627,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) ++ !guest_has_spec_ctrl_msr(vcpu)) + return 1; + + if (kvm_spec_ctrl_test_value(data)) +@@ -2658,12 +2652,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + break; + case MSR_IA32_PRED_CMD: + if (!msr->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) ++ !guest_has_pred_cmd_msr(vcpu)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; +- if (!boot_cpu_has(X86_FEATURE_AMD_IBPB)) ++ if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; + if (!data) + break; +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 47b8357b97517..c01aac2bac37c 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -1826,7 +1826,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) ++ !guest_has_spec_ctrl_msr(vcpu)) + return 1; + + msr_info->data = to_vmx(vcpu)->spec_ctrl; +@@ -2028,7 +2028,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) ++ !guest_has_spec_ctrl_msr(vcpu)) + return 1; + + if (kvm_spec_ctrl_test_value(data)) +@@ -2063,12 +2063,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + goto find_uret_msr; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) ++ !guest_has_pred_cmd_msr(vcpu)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; +- if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL)) ++ if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; + if (!data) + break; +diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c +index fe7a12599d8eb..968d7005f4a72 100644 +--- a/arch/x86/mm/ident_map.c ++++ b/arch/x86/mm/ident_map.c +@@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, + unsigned long addr, unsigned long end) + { + unsigned long next; ++ int result; + + for (; addr < end; addr = next) { + p4d_t *p4d = p4d_page + p4d_index(addr); +@@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, + + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, 0); +- ident_pud_init(info, pud, addr, next); ++ result = ident_pud_init(info, pud, addr, next); ++ if (result) ++ return result; ++ + continue; + } + pud = (pud_t *)info->alloc_pgt_page(info->context); + if (!pud) + return -ENOMEM; +- ident_pud_init(info, pud, addr, next); ++ ++ result = ident_pud_init(info, pud, addr, next); ++ if (result) ++ return result; ++ + set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag)); + } + +diff --git a/crypto/Kconfig b/crypto/Kconfig +index 094ef56ab7b42..37de7d006858d 100644 +--- a/crypto/Kconfig ++++ b/crypto/Kconfig +@@ -145,7 +145,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS + + config CRYPTO_MANAGER_EXTRA_TESTS + bool "Enable extra run-time crypto self tests" +- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS ++ depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER + help + Enable extra run-time self tests of registered crypto algorithms, + including randomized fuzz tests. +diff --git a/crypto/ecdh.c b/crypto/ecdh.c +index b0232d6ab4ce7..d56b8603dec95 100644 +--- a/crypto/ecdh.c ++++ b/crypto/ecdh.c +@@ -53,12 +53,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, + ctx->private_key); + +- if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, +- (const u64 *)params.key, params.key_size) < 0) +- return -EINVAL; +- + memcpy(ctx->private_key, params.key, params.key_size); + ++ if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, ++ ctx->private_key, params.key_size) < 0) { ++ memzero_explicit(ctx->private_key, params.key_size); ++ return -EINVAL; ++ } + return 0; + } + +diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c +index 780214b5ca16e..ab6d61e80b1cb 100644 +--- a/drivers/accessibility/speakup/speakup_dectlk.c ++++ b/drivers/accessibility/speakup/speakup_dectlk.c +@@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth); + static int in_escape; + static int is_flushing; + +-static spinlock_t flush_lock; ++static DEFINE_SPINLOCK(flush_lock); + static DECLARE_WAIT_QUEUE_HEAD(flush); + + static struct var_t vars[] = { +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c +index 4ed755a963aa5..8f2dc176bb412 100644 +--- a/drivers/acpi/acpi_pnp.c ++++ b/drivers/acpi/acpi_pnp.c +@@ -319,6 +319,9 @@ static bool matching_id(const char *idstr, const char *list_id) + { + int i; + ++ if (strlen(idstr) != strlen(list_id)) ++ return false; ++ + if (memcmp(idstr, list_id, 3)) + return false; + +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index 94d91c67aeaeb..ef77dbcaf58f6 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -749,7 +749,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context) + static DEFINE_MUTEX(acpi_wakeup_lock); + + static int __acpi_device_wakeup_enable(struct acpi_device *adev, +- u32 target_state, int max_count) ++ u32 target_state) + { + struct acpi_device_wakeup *wakeup = &adev->wakeup; + acpi_status status; +@@ -757,9 +757,10 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev, + + mutex_lock(&acpi_wakeup_lock); + +- if (wakeup->enable_count >= max_count) ++ if (wakeup->enable_count >= INT_MAX) { ++ acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n"); + goto out; +- ++ } + if (wakeup->enable_count > 0) + goto inc; + +@@ -799,7 +800,7 @@ out: + */ + static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state) + { +- return __acpi_device_wakeup_enable(adev, target_state, 1); ++ return __acpi_device_wakeup_enable(adev, target_state); + } + + /** +@@ -829,8 +830,12 @@ out: + mutex_unlock(&acpi_wakeup_lock); + } + +-static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, +- int max_count) ++/** ++ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. ++ * @dev: Device to enable/disable to generate wakeup events. ++ * @enable: Whether to enable or disable the wakeup functionality. ++ */ ++int acpi_pm_set_device_wakeup(struct device *dev, bool enable) + { + struct acpi_device *adev; + int error; +@@ -850,36 +855,14 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, + return 0; + } + +- error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(), +- max_count); ++ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state()); + if (!error) + dev_dbg(dev, "Wakeup enabled by ACPI\n"); + + return error; + } +- +-/** +- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. +- * @dev: Device to enable/disable to generate wakeup events. +- * @enable: Whether to enable or disable the wakeup functionality. +- */ +-int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +-{ +- return __acpi_pm_set_device_wakeup(dev, enable, 1); +-} + EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup); + +-/** +- * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge. +- * @dev: Bridge device to enable/disable to generate wakeup events. +- * @enable: Whether to enable or disable the wakeup functionality. +- */ +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) +-{ +- return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX); +-} +-EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup); +- + /** + * acpi_dev_pm_low_power - Put ACPI device into a low-power state. + * @dev: Device to put into a low-power state. +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index 442608220b5c8..4c97b0f44fce2 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -5,6 +5,7 @@ + #include <linux/list_sort.h> + #include <linux/libnvdimm.h> + #include <linux/module.h> ++#include <linux/nospec.h> + #include <linux/mutex.h> + #include <linux/ndctl.h> + #include <linux/sysfs.h> +@@ -478,8 +479,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + cmd_mask = nd_desc->cmd_mask; + if (cmd == ND_CMD_CALL && call_pkg->nd_family) { + family = call_pkg->nd_family; +- if (!test_bit(family, &nd_desc->bus_family_mask)) ++ if (family > NVDIMM_BUS_FAMILY_MAX || ++ !test_bit(family, &nd_desc->bus_family_mask)) + return -EINVAL; ++ family = array_index_nospec(family, ++ NVDIMM_BUS_FAMILY_MAX + 1); + dsm_mask = acpi_desc->family_dsm_mask[family]; + guid = to_nfit_bus_uuid(family); + } else { +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index ad04824ca3baa..f2f5f1dc7c61d 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -541,7 +541,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, + ret = c->preproc(ares, c->preproc_data); + if (ret < 0) { + c->error = ret; +- return AE_CTRL_TERMINATE; ++ return AE_ABORT_METHOD; + } else if (ret > 0) { + return AE_OK; + } +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index b5117576792bc..2a3952925855d 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -3146,6 +3146,7 @@ static void binder_transaction(struct binder_proc *proc, + t->buffer->debug_id = t->debug_id; + t->buffer->transaction = t; + t->buffer->target_node = target_node; ++ t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); + trace_binder_transaction_alloc_buf(t->buffer); + + if (binder_alloc_copy_user_to_buffer( +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c +index 2f846b7ae8b82..7caf74ad24053 100644 +--- a/drivers/android/binder_alloc.c ++++ b/drivers/android/binder_alloc.c +@@ -696,6 +696,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, + binder_insert_free_buffer(alloc, buffer); + } + ++static void binder_alloc_clear_buf(struct binder_alloc *alloc, ++ struct binder_buffer *buffer); + /** + * binder_alloc_free_buf() - free a binder buffer + * @alloc: binder_alloc for this proc +@@ -706,6 +708,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, + void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) + { ++ /* ++ * We could eliminate the call to binder_alloc_clear_buf() ++ * from binder_alloc_deferred_release() by moving this to ++ * binder_alloc_free_buf_locked(). However, that could ++ * increase contention for the alloc mutex if clear_on_free ++ * is used frequently for large buffers. The mutex is not ++ * needed for correctness here. ++ */ ++ if (buffer->clear_on_free) { ++ binder_alloc_clear_buf(alloc, buffer); ++ buffer->clear_on_free = false; ++ } + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); +@@ -802,6 +816,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + ++ if (buffer->clear_on_free) { ++ binder_alloc_clear_buf(alloc, buffer); ++ buffer->clear_on_free = false; ++ } + binder_free_buf_locked(alloc, buffer); + buffers++; + } +@@ -1135,6 +1153,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + return lru_page->page_ptr; + } + ++/** ++ * binder_alloc_clear_buf() - zero out buffer ++ * @alloc: binder_alloc for this proc ++ * @buffer: binder buffer to be cleared ++ * ++ * memset the given buffer to 0 ++ */ ++static void binder_alloc_clear_buf(struct binder_alloc *alloc, ++ struct binder_buffer *buffer) ++{ ++ size_t bytes = binder_alloc_buffer_size(alloc, buffer); ++ binder_size_t buffer_offset = 0; ++ ++ while (bytes) { ++ unsigned long size; ++ struct page *page; ++ pgoff_t pgoff; ++ void *kptr; ++ ++ page = binder_alloc_get_page(alloc, buffer, ++ buffer_offset, &pgoff); ++ size = min_t(size_t, bytes, PAGE_SIZE - pgoff); ++ kptr = kmap(page) + pgoff; ++ memset(kptr, 0, size); ++ kunmap(page); ++ bytes -= size; ++ buffer_offset += size; ++ } ++} ++ + /** + * binder_alloc_copy_user_to_buffer() - copy src user to tgt user + * @alloc: binder_alloc for this proc +diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h +index 55d8b4106766a..6e8e001381af4 100644 +--- a/drivers/android/binder_alloc.h ++++ b/drivers/android/binder_alloc.h +@@ -23,6 +23,7 @@ struct binder_transaction; + * @entry: entry alloc->buffers + * @rb_node: node for allocated_buffers/free_buffers rb trees + * @free: %true if buffer is free ++ * @clear_on_free: %true if buffer must be zeroed after use + * @allow_user_free: %true if user is allowed to free buffer + * @async_transaction: %true if buffer is in use for an async txn + * @debug_id: unique ID for debugging +@@ -41,9 +42,10 @@ struct binder_buffer { + struct rb_node rb_node; /* free entry by size or allocated entry */ + /* by address */ + unsigned free:1; ++ unsigned clear_on_free:1; + unsigned allow_user_free:1; + unsigned async_transaction:1; +- unsigned debug_id:29; ++ unsigned debug_id:28; + + struct binder_transaction *transaction; + +diff --git a/drivers/base/core.c b/drivers/base/core.c +index d661ada1518fb..e8cb66093f211 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -1386,7 +1386,7 @@ static void device_links_purge(struct device *dev) + return; + + mutex_lock(&wfs_lock); +- list_del(&dev->links.needs_suppliers); ++ list_del_init(&dev->links.needs_suppliers); + mutex_unlock(&wfs_lock); + + /* +diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c +index beb34b4f76b0e..172f720b8d637 100644 +--- a/drivers/block/null_blk_zoned.c ++++ b/drivers/block/null_blk_zoned.c +@@ -6,8 +6,7 @@ + #define CREATE_TRACE_POINTS + #include "null_blk_trace.h" + +-/* zone_size in MBs to sectors. */ +-#define ZONE_SIZE_SHIFT 11 ++#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT) + + static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) + { +@@ -16,7 +15,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) + + int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) + { +- sector_t dev_size = (sector_t)dev->size * 1024 * 1024; ++ sector_t dev_capacity_sects, zone_capacity_sects; + sector_t sector = 0; + unsigned int i; + +@@ -38,9 +37,13 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) + return -EINVAL; + } + +- dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; +- dev->nr_zones = dev_size >> +- (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); ++ zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity); ++ dev_capacity_sects = MB_TO_SECTS(dev->size); ++ dev->zone_size_sects = MB_TO_SECTS(dev->zone_size); ++ dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects); ++ if (dev_capacity_sects & (dev->zone_size_sects - 1)) ++ dev->nr_zones++; ++ + dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), + GFP_KERNEL | __GFP_ZERO); + if (!dev->zones) +@@ -101,8 +104,12 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) + struct blk_zone *zone = &dev->zones[i]; + + zone->start = zone->wp = sector; +- zone->len = dev->zone_size_sects; +- zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT; ++ if (zone->start + dev->zone_size_sects > dev_capacity_sects) ++ zone->len = dev_capacity_sects - zone->start; ++ else ++ zone->len = dev->zone_size_sects; ++ zone->capacity = ++ min_t(sector_t, zone->len, zone_capacity_sects); + zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; + zone->cond = BLK_ZONE_COND_EMPTY; + +@@ -332,8 +339,11 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, + + trace_nullb_zone_op(cmd, zno, zone->cond); + +- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) ++ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { ++ if (append) ++ return BLK_STS_IOERR; + return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); ++ } + + null_lock_zone(dev, zno); + +diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c +index 4f4474eecadb7..d9dd138ca9c64 100644 +--- a/drivers/block/rnbd/rnbd-clt-sysfs.c ++++ b/drivers/block/rnbd/rnbd-clt-sysfs.c +@@ -433,8 +433,9 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev) + * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because + * of sysfs link already was removed already. + */ +- if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) { ++ if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) { + sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name); ++ kfree(dev->blk_symlink_name); + module_put(THIS_MODULE); + } + } +@@ -487,10 +488,17 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf, + static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev) + { + struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj; +- int ret; ++ int ret, len; ++ ++ len = strlen(dev->pathname) + strlen(dev->sess->sessname) + 2; ++ dev->blk_symlink_name = kzalloc(len, GFP_KERNEL); ++ if (!dev->blk_symlink_name) { ++ rnbd_clt_err(dev, "Failed to allocate memory for blk_symlink_name\n"); ++ return -ENOMEM; ++ } + + ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name, +- sizeof(dev->blk_symlink_name)); ++ len); + if (ret) { + rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n", + ret); +@@ -508,7 +516,8 @@ static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev) + return 0; + + out_err: +- dev->blk_symlink_name[0] = '\0'; ++ kfree(dev->blk_symlink_name); ++ dev->blk_symlink_name = NULL ; + return ret; + } + +diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c +index 8b2411ccbda97..7af1b60582fe5 100644 +--- a/drivers/block/rnbd/rnbd-clt.c ++++ b/drivers/block/rnbd/rnbd-clt.c +@@ -59,6 +59,7 @@ static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev) + ida_simple_remove(&index_ida, dev->clt_device_id); + mutex_unlock(&ida_lock); + kfree(dev->hw_queues); ++ kfree(dev->pathname); + rnbd_clt_put_sess(dev->sess); + mutex_destroy(&dev->lock); + kfree(dev); +@@ -1381,10 +1382,16 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, + pathname, sess->sessname, ret); + goto out_queues; + } ++ ++ dev->pathname = kstrdup(pathname, GFP_KERNEL); ++ if (!dev->pathname) { ++ ret = -ENOMEM; ++ goto out_queues; ++ } ++ + dev->clt_device_id = ret; + dev->sess = sess; + dev->access_mode = access_mode; +- strlcpy(dev->pathname, pathname, sizeof(dev->pathname)); + mutex_init(&dev->lock); + refcount_set(&dev->refcount, 1); + dev->dev_state = DEV_STATE_INIT; +@@ -1413,8 +1420,8 @@ static bool __exists_dev(const char *pathname) + list_for_each_entry(sess, &sess_list, list) { + mutex_lock(&sess->lock); + list_for_each_entry(dev, &sess->devs_list, list) { +- if (!strncmp(dev->pathname, pathname, +- sizeof(dev->pathname))) { ++ if (strlen(dev->pathname) == strlen(pathname) && ++ !strcmp(dev->pathname, pathname)) { + found = true; + break; + } +diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h +index ed33654aa4868..b193d59040503 100644 +--- a/drivers/block/rnbd/rnbd-clt.h ++++ b/drivers/block/rnbd/rnbd-clt.h +@@ -108,7 +108,7 @@ struct rnbd_clt_dev { + u32 clt_device_id; + struct mutex lock; + enum rnbd_clt_dev_state dev_state; +- char pathname[NAME_MAX]; ++ char *pathname; + enum rnbd_access_mode access_mode; + bool read_only; + bool rotational; +@@ -126,7 +126,7 @@ struct rnbd_clt_dev { + struct list_head list; + struct gendisk *gd; + struct kobject kobj; +- char blk_symlink_name[NAME_MAX]; ++ char *blk_symlink_name; + refcount_t refcount; + struct work_struct unmap_on_rmmod_work; + }; +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c +index 76912c584a76d..9860d4842f36c 100644 +--- a/drivers/block/xen-blkback/xenbus.c ++++ b/drivers/block/xen-blkback/xenbus.c +@@ -274,6 +274,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) + + if (ring->xenblkd) { + kthread_stop(ring->xenblkd); ++ ring->xenblkd = NULL; + wake_up(&ring->shutdown_wq); + } + +@@ -675,7 +676,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev, + /* setup back pointer */ + be->blkif->be = be; + +- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, ++ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, ++ backend_changed, + "%s/%s", dev->nodename, "physical-device"); + if (err) + goto fail; +diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c +index ba45c59bd9f36..5f9f027956317 100644 +--- a/drivers/bluetooth/btmtksdio.c ++++ b/drivers/bluetooth/btmtksdio.c +@@ -704,7 +704,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); +- return err; ++ goto free_fw; + } + + fw_ptr = fw->data; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 1005b6e8ff743..80468745d5c5e 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1763,6 +1763,8 @@ static int btusb_setup_bcm92035(struct hci_dev *hdev) + + static int btusb_setup_csr(struct hci_dev *hdev) + { ++ struct btusb_data *data = hci_get_drvdata(hdev); ++ u16 bcdDevice = le16_to_cpu(data->udev->descriptor.bcdDevice); + struct hci_rp_read_local_version *rp; + struct sk_buff *skb; + bool is_fake = false; +@@ -1832,6 +1834,12 @@ static int btusb_setup_csr(struct hci_dev *hdev) + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0) + is_fake = true; + ++ /* Other clones which beat all the above checks */ ++ else if (bcdDevice == 0x0134 && ++ le16_to_cpu(rp->lmp_subver) == 0x0c5c && ++ le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_2_0) ++ is_fake = true; ++ + if (is_fake) { + bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds..."); + +@@ -3067,7 +3075,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); +- return err; ++ goto err_release_fw; + } + + fw_ptr = fw->data; +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c +index 981d96cc76959..78d635f1d1567 100644 +--- a/drivers/bluetooth/hci_h5.c ++++ b/drivers/bluetooth/hci_h5.c +@@ -245,6 +245,9 @@ static int h5_close(struct hci_uart *hu) + skb_queue_purge(&h5->rel); + skb_queue_purge(&h5->unrel); + ++ kfree_skb(h5->rx_skb); ++ h5->rx_skb = NULL; ++ + if (h5->vnd && h5->vnd->close) + h5->vnd->close(h5); + +diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c +index e71a6f52ea0cf..2d7c764bb7dcf 100644 +--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c ++++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c +@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, + goto error; + + mc_adev = resource->data; +- if (!mc_adev) ++ if (!mc_adev) { ++ error = -EINVAL; + goto error; ++ } + + mc_adev->consumer_link = device_link_add(&mc_dev->dev, + &mc_adev->dev, +diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c +index 76a6ee505d33d..806766b1b45f6 100644 +--- a/drivers/bus/fsl-mc/fsl-mc-bus.c ++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c +@@ -967,8 +967,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, mc); + + plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); +- if (plat_res) ++ if (plat_res) { + mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res); ++ if (IS_ERR(mc->fsl_mc_regs)) ++ return PTR_ERR(mc->fsl_mc_regs); ++ } + + if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) && + !dev_of_node(&pdev->dev)) { +diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c +index 0ffdebde82657..8cefa359fccd8 100644 +--- a/drivers/bus/mhi/core/init.c ++++ b/drivers/bus/mhi/core/init.c +@@ -610,7 +610,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + { + struct mhi_event *mhi_event; + const struct mhi_event_config *event_cfg; +- struct device *dev = &mhi_cntrl->mhi_dev->dev; ++ struct device *dev = mhi_cntrl->cntrl_dev; + int i, num; + + num = config->num_events; +@@ -692,7 +692,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *config) + { + const struct mhi_channel_config *ch_cfg; +- struct device *dev = &mhi_cntrl->mhi_dev->dev; ++ struct device *dev = mhi_cntrl->cntrl_dev; + int i; + u32 chan; + +@@ -1276,10 +1276,8 @@ static int mhi_driver_remove(struct device *dev) + mutex_unlock(&mhi_chan->mutex); + } + +- read_lock_bh(&mhi_cntrl->pm_lock); + while (mhi_dev->dev_wake) + mhi_device_put(mhi_dev); +- read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + } +diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c +index 9f7ed1fcd4285..626dedd110cbc 100644 +--- a/drivers/bus/mips_cdmm.c ++++ b/drivers/bus/mips_cdmm.c +@@ -559,10 +559,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) + dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id); + ++id; + ret = device_register(&dev->dev); +- if (ret) { ++ if (ret) + put_device(&dev->dev); +- kfree(dev); +- } + } + } + +diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c +index 3c4c956035954..c8cbec5308f02 100644 +--- a/drivers/clk/at91/sam9x60.c ++++ b/drivers/clk/at91/sam9x60.c +@@ -174,7 +174,6 @@ static void __init sam9x60_pmc_setup(struct device_node *np) + struct regmap *regmap; + struct clk_hw *hw; + int i; +- bool bypass; + + i = of_property_match_string(np, "clock-names", "td_slck"); + if (i < 0) +@@ -209,10 +208,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) + if (IS_ERR(hw)) + goto err_free; + +- bypass = of_property_read_bool(np, "atmel,osc-bypass"); +- +- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, +- bypass); ++ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0); + if (IS_ERR(hw)) + goto err_free; + main_osc_hw = hw; +diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c +index 0db2ab3eca147..a092a940baa40 100644 +--- a/drivers/clk/at91/sama7g5.c ++++ b/drivers/clk/at91/sama7g5.c +@@ -838,7 +838,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) + sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1, + nck(sama7g5_systemck), + nck(sama7g5_periphck), +- nck(sama7g5_gck)); ++ nck(sama7g5_gck), 8); + if (!sama7g5_pmc) + return; + +@@ -980,6 +980,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np) + sama7g5_prog_mux_table); + if (IS_ERR(hw)) + goto err_free; ++ ++ sama7g5_pmc->pchws[i] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) { +@@ -1052,7 +1054,7 @@ err_free: + kfree(alloc_mem); + } + +- pmc_data_free(sama7g5_pmc); ++ kfree(sama7g5_pmc); + } + + /* Some clks are used for a clocksource */ +diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c +index 8333e20dc9d22..69e2f85f7029d 100644 +--- a/drivers/clk/bcm/clk-bcm2711-dvp.c ++++ b/drivers/clk/bcm/clk-bcm2711-dvp.c +@@ -108,6 +108,7 @@ static const struct of_device_id clk_dvp_dt_ids[] = { + { .compatible = "brcm,brcm2711-dvp", }, + { /* sentinel */ } + }; ++MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids); + + static struct platform_driver clk_dvp_driver = { + .probe = clk_dvp_probe, +diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c +index 0221180a4dd73..1e81c8d8a6fd3 100644 +--- a/drivers/clk/clk-fsl-sai.c ++++ b/drivers/clk/clk-fsl-sai.c +@@ -68,9 +68,20 @@ static int fsl_sai_clk_probe(struct platform_device *pdev) + if (IS_ERR(hw)) + return PTR_ERR(hw); + ++ platform_set_drvdata(pdev, hw); ++ + return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw); + } + ++static int fsl_sai_clk_remove(struct platform_device *pdev) ++{ ++ struct clk_hw *hw = platform_get_drvdata(pdev); ++ ++ clk_hw_unregister_composite(hw); ++ ++ return 0; ++} ++ + static const struct of_device_id of_fsl_sai_clk_ids[] = { + { .compatible = "fsl,vf610-sai-clock" }, + { } +@@ -79,6 +90,7 @@ MODULE_DEVICE_TABLE(of, of_fsl_sai_clk_ids); + + static struct platform_driver fsl_sai_clk_driver = { + .probe = fsl_sai_clk_probe, ++ .remove = fsl_sai_clk_remove, + .driver = { + .name = "fsl-sai-clk", + .of_match_table = of_fsl_sai_clk_ids, +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c +index aa21371f9104c..a3e883a9f4067 100644 +--- a/drivers/clk/clk-s2mps11.c ++++ b/drivers/clk/clk-s2mps11.c +@@ -195,6 +195,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) + return ret; + + err_reg: ++ of_node_put(s2mps11_clks[0].clk_np); + while (--i >= 0) + clkdev_drop(s2mps11_clks[i].lookup); + +diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c +index c90460e7ef215..43db67337bc06 100644 +--- a/drivers/clk/clk-versaclock5.c ++++ b/drivers/clk/clk-versaclock5.c +@@ -739,8 +739,8 @@ static int vc5_update_power(struct device_node *np_output, + { + u32 value; + +- if (!of_property_read_u32(np_output, +- "idt,voltage-microvolts", &value)) { ++ if (!of_property_read_u32(np_output, "idt,voltage-microvolt", ++ &value)) { + clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK; + switch (value) { + case 1800000: +diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c +index dac6edc670cce..c8e9cb6c8e39c 100644 +--- a/drivers/clk/ingenic/cgu.c ++++ b/drivers/clk/ingenic/cgu.c +@@ -392,15 +392,21 @@ static unsigned int + ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info, + unsigned int div) + { +- unsigned int i; ++ unsigned int i, best_i = 0, best = (unsigned int)-1; + + for (i = 0; i < (1 << clk_info->div.bits) + && clk_info->div.div_table[i]; i++) { +- if (clk_info->div.div_table[i] >= div) +- return i; ++ if (clk_info->div.div_table[i] >= div && ++ clk_info->div.div_table[i] < best) { ++ best = clk_info->div.div_table[i]; ++ best_i = i; ++ ++ if (div == best) ++ break; ++ } + } + +- return i - 1; ++ return best_i; + } + + static unsigned +diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig +index 034da203e8e0e..9a8a548d839d8 100644 +--- a/drivers/clk/meson/Kconfig ++++ b/drivers/clk/meson/Kconfig +@@ -110,6 +110,7 @@ config COMMON_CLK_G12A + select COMMON_CLK_MESON_AO_CLKC + select COMMON_CLK_MESON_EE_CLKC + select COMMON_CLK_MESON_CPU_DYNDIV ++ select COMMON_CLK_MESON_VID_PLL_DIV + select MFD_SYSCON + help + Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2 +diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c +index e9e306d4e9af9..41271351cf1f4 100644 +--- a/drivers/clk/mvebu/armada-37xx-xtal.c ++++ b/drivers/clk/mvebu/armada-37xx-xtal.c +@@ -13,8 +13,8 @@ + #include <linux/platform_device.h> + #include <linux/regmap.h> + +-#define NB_GPIO1_LATCH 0xC +-#define XTAL_MODE BIT(31) ++#define NB_GPIO1_LATCH 0x8 ++#define XTAL_MODE BIT(9) + + static int armada_3700_xtal_clock_probe(struct platform_device *pdev) + { +diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c +index 68d8f7aaf64e1..b080739ab0c33 100644 +--- a/drivers/clk/qcom/gcc-sc7180.c ++++ b/drivers/clk/qcom/gcc-sc7180.c +@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 4, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_floor_ops, + }, + }; + +@@ -666,7 +666,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .name = "gcc_sdcc2_apps_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = 5, +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_floor_ops, + }, + }; + +diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c +index 17ebbac7ddfb4..046d79416b7d0 100644 +--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c ++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c +@@ -26,7 +26,6 @@ + #include <dt-bindings/clock/r8a779a0-cpg-mssr.h> + + #include "renesas-cpg-mssr.h" +-#include "rcar-gen3-cpg.h" + + enum rcar_r8a779a0_clk_types { + CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM, +@@ -84,6 +83,14 @@ enum clk_ids { + DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \ + .offset = _offset) + ++#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \ ++ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \ ++ (_parent0) << 16 | (_parent1), \ ++ .div = (_div0) << 16 | (_div1), .offset = _md) ++ ++#define DEF_OSC(_name, _id, _parent, _div) \ ++ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_OSC, _parent, .div = _div) ++ + static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), +@@ -136,8 +143,8 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { + DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878), + DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880), + +- DEF_GEN3_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8), +- DEF_GEN3_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), ++ DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8), ++ DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), + }; + + static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = { +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +index 5f66bf8797723..149cfde817cba 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +@@ -389,6 +389,7 @@ static struct clk_div_table ths_div_table[] = { + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, ++ { /* Sentinel */ }, + }; + static const char * const ths_parents[] = { "osc24M" }; + static struct ccu_div ths_clk = { +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +index 6b636362379ee..7e629a4493afd 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +@@ -322,6 +322,7 @@ static struct clk_div_table ths_div_table[] = { + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, ++ { /* Sentinel */ }, + }; + static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M", + 0x074, 0, 2, ths_div_table, BIT(31), 0); +diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c +index cfbaa90c7adbf..a5f526bb0483e 100644 +--- a/drivers/clk/tegra/clk-dfll.c ++++ b/drivers/clk/tegra/clk-dfll.c +@@ -1856,13 +1856,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td) + &td->reg_init_uV); + if (!ret) { + dev_err(td->dev, "couldn't get initialized voltage\n"); +- return ret; ++ return -EINVAL; + } + + ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period); + if (!ret) { + dev_err(td->dev, "couldn't get PWM period\n"); +- return ret; ++ return -EINVAL; + } + td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1); + +diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h +index ff7da2d3e94d8..24413812ec5b6 100644 +--- a/drivers/clk/tegra/clk-id.h ++++ b/drivers/clk/tegra/clk-id.h +@@ -227,6 +227,7 @@ enum clk_id { + tegra_clk_sdmmc4, + tegra_clk_sdmmc4_8, + tegra_clk_se, ++ tegra_clk_se_10, + tegra_clk_soc_therm, + tegra_clk_soc_therm_8, + tegra_clk_sor0, +diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c +index 2b2a3b81c16ba..60cc34f90cb9b 100644 +--- a/drivers/clk/tegra/clk-tegra-periph.c ++++ b/drivers/clk/tegra/clk-tegra-periph.c +@@ -630,7 +630,7 @@ static struct tegra_periph_init_data periph_clks[] = { + INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8), + INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9), + INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), +- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), ++ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10), + INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8), + INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8), + INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03), +diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c +index 95e36ba64accf..8024c6d2b9e95 100644 +--- a/drivers/clk/ti/fapll.c ++++ b/drivers/clk/ti/fapll.c +@@ -498,6 +498,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, + { + struct clk_init_data *init; + struct fapll_synth *synth; ++ struct clk *clk = ERR_PTR(-ENOMEM); + + init = kzalloc(sizeof(*init), GFP_KERNEL); + if (!init) +@@ -520,13 +521,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, + synth->hw.init = init; + synth->clk_pll = pll_clk; + +- return clk_register(NULL, &synth->hw); ++ clk = clk_register(NULL, &synth->hw); ++ if (IS_ERR(clk)) { ++ pr_err("failed to register clock\n"); ++ goto free; ++ } ++ ++ return clk; + + free: + kfree(synth); + kfree(init); + +- return ERR_PTR(-ENOMEM); ++ return clk; + } + + static void __init ti_fapll_setup(struct device_node *node) +diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig +index 68b087bff59cc..2be849bb794ac 100644 +--- a/drivers/clocksource/Kconfig ++++ b/drivers/clocksource/Kconfig +@@ -654,7 +654,7 @@ config ATCPIT100_TIMER + + config RISCV_TIMER + bool "Timer for the RISC-V platform" if COMPILE_TEST +- depends on GENERIC_SCHED_CLOCK && RISCV ++ depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI + select TIMER_PROBE + select TIMER_OF + help +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c +index 6c3e841801461..d0177824c518b 100644 +--- a/drivers/clocksource/arm_arch_timer.c ++++ b/drivers/clocksource/arm_arch_timer.c +@@ -396,10 +396,10 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long + ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; + + if (access == ARCH_TIMER_PHYS_ACCESS) { +- cval = evt + arch_counter_get_cntpct(); ++ cval = evt + arch_counter_get_cntpct_stable(); + write_sysreg(cval, cntp_cval_el0); + } else { +- cval = evt + arch_counter_get_cntvct(); ++ cval = evt + arch_counter_get_cntvct_stable(); + write_sysreg(cval, cntv_cval_el0); + } + +@@ -822,15 +822,24 @@ static void arch_timer_evtstrm_enable(int divider) + + static void arch_timer_configure_evtstream(void) + { +- int evt_stream_div, pos; ++ int evt_stream_div, lsb; ++ ++ /* ++ * As the event stream can at most be generated at half the frequency ++ * of the counter, use half the frequency when computing the divider. ++ */ ++ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; ++ ++ /* ++ * Find the closest power of two to the divisor. If the adjacent bit ++ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). ++ */ ++ lsb = fls(evt_stream_div) - 1; ++ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) ++ lsb++; + +- /* Find the closest power of two to the divisor */ +- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; +- pos = fls(evt_stream_div); +- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) +- pos--; + /* enable event stream */ +- arch_timer_evtstrm_enable(min(pos, 15)); ++ arch_timer_evtstrm_enable(max(0, min(lsb, 15))); + } + + static void arch_counter_set_user_access(void) +diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c +index 58fd9189fab7f..905fd6b163a81 100644 +--- a/drivers/clocksource/ingenic-timer.c ++++ b/drivers/clocksource/ingenic-timer.c +@@ -127,7 +127,7 @@ static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id) + return IRQ_HANDLED; + } + +-static struct clk * __init ingenic_tcu_get_clock(struct device_node *np, int id) ++static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id) + { + struct of_phandle_args args; + +diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c +index 80e9606020307..4efd0cf3b602d 100644 +--- a/drivers/clocksource/timer-cadence-ttc.c ++++ b/drivers/clocksource/timer-cadence-ttc.c +@@ -413,10 +413,8 @@ static int __init ttc_setup_clockevent(struct clk *clk, + ttcce->ttc.clk = clk; + + err = clk_prepare_enable(ttcce->ttc.clk); +- if (err) { +- kfree(ttcce); +- return err; +- } ++ if (err) ++ goto out_kfree; + + ttcce->ttc.clk_rate_change_nb.notifier_call = + ttc_rate_change_clockevent_cb; +@@ -426,7 +424,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, + &ttcce->ttc.clk_rate_change_nb); + if (err) { + pr_warn("Unable to register clock notifier.\n"); +- return err; ++ goto out_kfree; + } + + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); +@@ -455,15 +453,17 @@ static int __init ttc_setup_clockevent(struct clk *clk, + + err = request_irq(irq, ttc_clock_event_interrupt, + IRQF_TIMER, ttcce->ce.name, ttcce); +- if (err) { +- kfree(ttcce); +- return err; +- } ++ if (err) ++ goto out_kfree; + + clockevents_config_and_register(&ttcce->ce, + ttcce->ttc.freq / PRESCALE, 1, 0xfffe); + + return 0; ++ ++out_kfree: ++ kfree(ttcce); ++ return err; + } + + static int __init ttc_timer_probe(struct platform_device *pdev) +diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c +index d01ff41818676..5101e834d78ff 100644 +--- a/drivers/clocksource/timer-orion.c ++++ b/drivers/clocksource/timer-orion.c +@@ -143,7 +143,8 @@ static int __init orion_timer_init(struct device_node *np) + irq = irq_of_parse_and_map(np, 1); + if (irq <= 0) { + pr_err("%pOFn: unable to parse timer1 irq\n", np); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out_unprep_clk; + } + + rate = clk_get_rate(clk); +@@ -160,7 +161,7 @@ static int __init orion_timer_init(struct device_node *np) + clocksource_mmio_readl_down); + if (ret) { + pr_err("Failed to initialize mmio timer\n"); +- return ret; ++ goto out_unprep_clk; + } + + sched_clock_register(orion_read_sched_clock, 32, rate); +@@ -170,7 +171,7 @@ static int __init orion_timer_init(struct device_node *np) + "orion_event", NULL); + if (ret) { + pr_err("%pOFn: unable to setup irq\n", np); +- return ret; ++ goto out_unprep_clk; + } + + ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; +@@ -183,5 +184,9 @@ static int __init orion_timer_init(struct device_node *np) + orion_delay_timer_init(rate); + + return 0; ++ ++out_unprep_clk: ++ clk_disable_unprepare(clk); ++ return ret; + } + TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); +diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c +index 039c54a78aa57..710acc0a37044 100644 +--- a/drivers/counter/microchip-tcb-capture.c ++++ b/drivers/counter/microchip-tcb-capture.c +@@ -183,16 +183,20 @@ static int mchp_tc_count_action_get(struct counter_device *counter, + + regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); + +- *action = MCHP_TC_SYNAPSE_ACTION_NONE; +- +- if (cmr & ATMEL_TC_ETRGEDG_NONE) ++ switch (cmr & ATMEL_TC_ETRGEDG) { ++ default: + *action = MCHP_TC_SYNAPSE_ACTION_NONE; +- else if (cmr & ATMEL_TC_ETRGEDG_RISING) ++ break; ++ case ATMEL_TC_ETRGEDG_RISING: + *action = MCHP_TC_SYNAPSE_ACTION_RISING_EDGE; +- else if (cmr & ATMEL_TC_ETRGEDG_FALLING) ++ break; ++ case ATMEL_TC_ETRGEDG_FALLING: + *action = MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE; +- else if (cmr & ATMEL_TC_ETRGEDG_BOTH) ++ break; ++ case ATMEL_TC_ETRGEDG_BOTH: + *action = MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE; ++ break; ++ } + + return 0; + } +diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm +index 015ec0c028358..1f73fa75b1a05 100644 +--- a/drivers/cpufreq/Kconfig.arm ++++ b/drivers/cpufreq/Kconfig.arm +@@ -94,7 +94,7 @@ config ARM_IMX6Q_CPUFREQ + tristate "Freescale i.MX6 cpufreq support" + depends on ARCH_MXC + depends on REGULATOR_ANATOP +- select NVMEM_IMX_OCOTP ++ depends on NVMEM_IMX_OCOTP || COMPILE_TEST + select PM_OPP + help + This adds cpufreq driver support for Freescale i.MX6 series SoCs. +diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c +index 39e34f5066d3d..b0fc5e84f8570 100644 +--- a/drivers/cpufreq/armada-8k-cpufreq.c ++++ b/drivers/cpufreq/armada-8k-cpufreq.c +@@ -204,6 +204,12 @@ static void __exit armada_8k_cpufreq_exit(void) + } + module_exit(armada_8k_cpufreq_exit); + ++static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = { ++ { .compatible = "marvell,ap806-cpu-clock" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match); ++ + MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>"); + MODULE_DESCRIPTION("Armada 8K cpufreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c +index 5a7f6dafcddb6..ac57cddc5f2fe 100644 +--- a/drivers/cpufreq/highbank-cpufreq.c ++++ b/drivers/cpufreq/highbank-cpufreq.c +@@ -101,6 +101,13 @@ out_put_node: + } + module_init(hb_cpufreq_driver_init); + ++static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = { ++ { .compatible = "calxeda,highbank" }, ++ { .compatible = "calxeda,ecx-2000" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match); ++ + MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); + MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 36a3ccfe6d3d1..cb95da684457f 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -2207,9 +2207,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, + unsigned int policy_min, + unsigned int policy_max) + { +- int max_freq = intel_pstate_get_max_freq(cpu); + int32_t max_policy_perf, min_policy_perf; + int max_state, turbo_max; ++ int max_freq; + + /* + * HWP needs some special consideration, because on BDX the +@@ -2223,6 +2223,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + turbo_max = cpu->pstate.turbo_pstate; + } ++ max_freq = max_state * cpu->pstate.scaling; + + max_policy_perf = max_state * policy_max / max_freq; + if (policy_max == policy_min) { +@@ -2325,9 +2326,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu, + static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, + struct cpufreq_policy_data *policy) + { ++ int max_freq; ++ + update_turbo_state(); +- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, +- intel_pstate_get_max_freq(cpu)); ++ if (hwp_active) { ++ int max_state, turbo_max; ++ ++ intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); ++ max_freq = max_state * cpu->pstate.scaling; ++ } else { ++ max_freq = intel_pstate_get_max_freq(cpu); ++ } ++ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); + + intel_pstate_adjust_policy_max(cpu, policy); + } +diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c +index 0ea88778882ac..86f612593e497 100644 +--- a/drivers/cpufreq/loongson1-cpufreq.c ++++ b/drivers/cpufreq/loongson1-cpufreq.c +@@ -216,6 +216,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = { + + module_platform_driver(ls1x_cpufreq_platdrv); + ++MODULE_ALIAS("platform:ls1x-cpufreq"); + MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>"); + MODULE_DESCRIPTION("Loongson1 CPUFreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c +index 7d1212c9b7c88..a310372dc53e9 100644 +--- a/drivers/cpufreq/mediatek-cpufreq.c ++++ b/drivers/cpufreq/mediatek-cpufreq.c +@@ -540,6 +540,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { + + { } + }; ++MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines); + + static int __init mtk_cpufreq_driver_init(void) + { +diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c +index d06b37822c3df..fba9937a406b3 100644 +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c +@@ -464,6 +464,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { + { .compatible = "qcom,msm8960", .data = &match_data_krait }, + {}, + }; ++MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list); + + /* + * Since the driver depends on smem and nvmem drivers, which may +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c +index 43db05b949d95..e5140ad63db83 100644 +--- a/drivers/cpufreq/scpi-cpufreq.c ++++ b/drivers/cpufreq/scpi-cpufreq.c +@@ -233,6 +233,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { + }; + module_platform_driver(scpi_cpufreq_platdrv); + ++MODULE_ALIAS("platform:scpi-cpufreq"); + MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); + MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver"); + MODULE_LICENSE("GPL v2"); +diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c +index 4ac6fb23792a0..c40d3d7d4ea43 100644 +--- a/drivers/cpufreq/sti-cpufreq.c ++++ b/drivers/cpufreq/sti-cpufreq.c +@@ -292,6 +292,13 @@ register_cpufreq_dt: + } + module_init(sti_cpufreq_init); + ++static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { ++ { .compatible = "st,stih407" }, ++ { .compatible = "st,stih410" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); ++ + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver"); + MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>"); + MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); +diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c +index 9907a165135b7..2deed8d8773fa 100644 +--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c ++++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c +@@ -167,6 +167,7 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = { + { .compatible = "allwinner,sun50i-h6" }, + {} + }; ++MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); + + static const struct of_device_id *sun50i_cpufreq_match_node(void) + { +diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c +index e89b905754d21..f711d8eaea6a2 100644 +--- a/drivers/cpufreq/vexpress-spc-cpufreq.c ++++ b/drivers/cpufreq/vexpress-spc-cpufreq.c +@@ -591,6 +591,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = { + }; + module_platform_driver(ve_spc_cpufreq_platdrv); + ++MODULE_ALIAS("platform:vexpress-spc-cpufreq"); + MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); + MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); + MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver"); +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index 37da0c070a883..9d6645b1f0abe 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -548,6 +548,7 @@ config CRYPTO_DEV_ATMEL_SHA + + config CRYPTO_DEV_ATMEL_I2C + tristate ++ select BITREVERSE + + config CRYPTO_DEV_ATMEL_ECC + tristate "Support for Microchip / Atmel ECC hw accelerator" +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +index a94bf28f858a7..4c5a2c11d7141 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +@@ -262,13 +262,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) + u32 common; + u64 byte_count; + __le32 *bf; +- void *buf; ++ void *buf = NULL; + int j, i, todo; + int nbw = 0; + u64 fill, min_fill; + __be64 *bebits; + __le64 *lebits; +- void *result; ++ void *result = NULL; + u64 bs; + int digestsize; + dma_addr_t addr_res, addr_pad; +@@ -285,13 +285,17 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) + + /* the padding could be up to two block. */ + buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); +- if (!buf) +- return -ENOMEM; ++ if (!buf) { ++ err = -ENOMEM; ++ goto theend; ++ } + bf = (__le32 *)buf; + + result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); +- if (!result) +- return -ENOMEM; ++ if (!result) { ++ err = -ENOMEM; ++ goto theend; ++ } + + flow = rctx->flow; + chan = &ce->chanlist[flow]; +@@ -403,11 +407,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + +- kfree(buf); + + memcpy(areq->result, result, algt->alg.hash.halg.digestsize); +- kfree(result); + theend: ++ kfree(buf); ++ kfree(result); + crypto_finalize_hash_request(engine, breq, err); + return 0; + } +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c +index 981de43ea5e24..2e3690f65786d 100644 +--- a/drivers/crypto/amcc/crypto4xx_core.c ++++ b/drivers/crypto/amcc/crypto4xx_core.c +@@ -917,7 +917,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, + } + + pd->pd_ctl.w = PD_CTL_HOST_READY | +- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) | ++ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) || + (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? + PD_CTL_HASH_FINAL : 0); + pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index cf5bd7666dfcd..8697ae53b0633 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -3404,8 +3404,8 @@ static int caam_cra_init(struct crypto_skcipher *tfm) + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { +- dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", +- tfm_name, PTR_ERR(fallback)); ++ pr_err("Failed to allocate %s fallback: %ld\n", ++ tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c +index 66f60d78bdc84..a24ae966df4a3 100644 +--- a/drivers/crypto/caam/caamalg_qi.c ++++ b/drivers/crypto/caam/caamalg_qi.c +@@ -2502,8 +2502,8 @@ static int caam_cra_init(struct crypto_skcipher *tfm) + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { +- dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", +- tfm_name, PTR_ERR(fallback)); ++ pr_err("Failed to allocate %s fallback: %ld\n", ++ tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + +diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c +index 98c1ff1744bb1..a780e627838ae 100644 +--- a/drivers/crypto/caam/caamalg_qi2.c ++++ b/drivers/crypto/caam/caamalg_qi2.c +@@ -1611,7 +1611,8 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { +- dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n", ++ dev_err(caam_alg->caam.dev, ++ "Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } +diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c +index eb2418450f120..2e1562108a858 100644 +--- a/drivers/crypto/inside-secure/safexcel.c ++++ b/drivers/crypto/inside-secure/safexcel.c +@@ -1639,7 +1639,7 @@ static int safexcel_probe_generic(void *pdev, + + priv->ring[i].rdr_req = devm_kcalloc(dev, + EIP197_DEFAULT_RING_SIZE, +- sizeof(priv->ring[i].rdr_req), ++ sizeof(*priv->ring[i].rdr_req), + GFP_KERNEL); + if (!priv->ring[i].rdr_req) + return -ENOMEM; +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c +index 4fd14d90cc409..1b1e0ab0a831a 100644 +--- a/drivers/crypto/omap-aes.c ++++ b/drivers/crypto/omap-aes.c +@@ -1137,7 +1137,7 @@ static int omap_aes_probe(struct platform_device *pdev) + if (err < 0) { + dev_err(dev, "%s: failed to get_sync(%d)\n", + __func__, err); +- goto err_res; ++ goto err_pm_disable; + } + + omap_aes_dma_stop(dd); +@@ -1246,6 +1246,7 @@ err_engine: + omap_aes_dma_cleanup(dd); + err_irq: + tasklet_kill(&dd->done_task); ++err_pm_disable: + pm_runtime_disable(dev); + err_res: + dd = NULL; +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c +index 6b9d47682d04d..52ef80efeddc6 100644 +--- a/drivers/crypto/qat/qat_common/qat_hal.c ++++ b/drivers/crypto/qat/qat_common/qat_hal.c +@@ -1146,7 +1146,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, + unsigned short mask; + unsigned short dr_offset = 0x10; + +- status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); ++ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); + if (CE_INUSE_CONTEXTS & ctx_enables) { + if (ctx & 0x1) { + pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index 66773892f665d..a713a35dc5022 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -460,7 +460,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) + /* + * locate current (offending) descriptor + */ +-static u32 current_desc_hdr(struct device *dev, int ch) ++static __be32 current_desc_hdr(struct device *dev, int ch) + { + struct talitos_private *priv = dev_get_drvdata(dev); + int tail, iter; +@@ -478,7 +478,7 @@ static u32 current_desc_hdr(struct device *dev, int ch) + + iter = tail; + while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && +- priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { ++ priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) { + iter = (iter + 1) & (priv->fifo_len - 1); + if (iter == tail) { + dev_err(dev, "couldn't locate current descriptor\n"); +@@ -486,7 +486,7 @@ static u32 current_desc_hdr(struct device *dev, int ch) + } + } + +- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { ++ if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) { + struct talitos_edesc *edesc; + + edesc = container_of(priv->chan[ch].fifo[iter].desc, +@@ -501,13 +501,13 @@ static u32 current_desc_hdr(struct device *dev, int ch) + /* + * user diagnostics; report root cause of error based on execution unit status + */ +-static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) ++static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr) + { + struct talitos_private *priv = dev_get_drvdata(dev); + int i; + + if (!desc_hdr) +- desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); ++ desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF)); + + switch (desc_hdr & DESC_HDR_SEL0_MASK) { + case DESC_HDR_SEL0_AFEU: +diff --git a/drivers/dax/super.c b/drivers/dax/super.c +index edc279be3e596..cadbd0a1a1ef0 100644 +--- a/drivers/dax/super.c ++++ b/drivers/dax/super.c +@@ -752,6 +752,7 @@ err_chrdev: + + static void __exit dax_core_exit(void) + { ++ dax_bus_exit(); + unregister_chrdev_region(dax_devt, MINORMASK+1); + ida_destroy(&dax_minor_ida); + dax_fs_exit(); +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c +index 1c8f2581cb09a..1187e5e80eded 100644 +--- a/drivers/dma-buf/dma-resv.c ++++ b/drivers/dma-buf/dma-resv.c +@@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) + max = max(old->shared_count + num_fences, + old->shared_max * 2); + } else { +- max = 4; ++ max = max(4ul, roundup_pow_of_two(num_fences)); + } + + new = dma_resv_list_alloc(max); +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c +index 2753a6b916f60..9b0d463f89bbd 100644 +--- a/drivers/dma/mv_xor_v2.c ++++ b/drivers/dma/mv_xor_v2.c +@@ -771,8 +771,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) + goto disable_clk; + + msi_desc = first_msi_entry(&pdev->dev); +- if (!msi_desc) ++ if (!msi_desc) { ++ ret = -ENODEV; + goto free_msi_irqs; ++ } + xor_dev->msi_desc = msi_desc; + + ret = devm_request_irq(&pdev->dev, msi_desc->irq, +diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c +index 82cf6c77f5c93..d3902784cae24 100644 +--- a/drivers/dma/ti/k3-udma.c ++++ b/drivers/dma/ti/k3-udma.c +@@ -3201,8 +3201,7 @@ static int udma_setup_resources(struct udma_dev *ud) + } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { + ud->tpl_levels = 3; + ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); +- ud->tpl_start_idx[0] = ud->tpl_start_idx[1] + +- UDMA_CAP3_HCHAN_CNT(cap3); ++ ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); + } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { + ud->tpl_levels = 2; + ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 1362274d840b9..620f7041db6b5 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -18,6 +18,9 @@ static struct amd64_family_type *fam_type; + /* Per-node stuff */ + static struct ecc_settings **ecc_stngs; + ++/* Device for the PCI component */ ++static struct device *pci_ctl_dev; ++ + /* + * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing + * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- +@@ -2683,6 +2686,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2) + return -ENODEV; + } + ++ if (!pci_ctl_dev) ++ pci_ctl_dev = &pvt->F0->dev; ++ + edac_dbg(1, "F0: %s\n", pci_name(pvt->F0)); + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); + edac_dbg(1, "F6: %s\n", pci_name(pvt->F6)); +@@ -2707,6 +2713,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2) + return -ENODEV; + } + ++ if (!pci_ctl_dev) ++ pci_ctl_dev = &pvt->F2->dev; ++ + edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); + edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); +@@ -3623,21 +3632,10 @@ static void remove_one_instance(unsigned int nid) + + static void setup_pci_device(void) + { +- struct mem_ctl_info *mci; +- struct amd64_pvt *pvt; +- + if (pci_ctl) + return; + +- mci = edac_mc_find(0); +- if (!mci) +- return; +- +- pvt = mci->pvt_info; +- if (pvt->umc) +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR); +- else +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); ++ pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR); + if (!pci_ctl) { + pr_warn("%s(): Unable to create PCI control\n", __func__); + pr_warn("%s(): PCI error report via EDAC not set\n", __func__); +@@ -3716,6 +3714,8 @@ static int __init amd64_edac_init(void) + return 0; + + err_pci: ++ pci_ctl_dev = NULL; ++ + msrs_free(msrs); + msrs = NULL; + +@@ -3745,6 +3745,8 @@ static void __exit amd64_edac_exit(void) + kfree(ecc_stngs); + ecc_stngs = NULL; + ++ pci_ctl_dev = NULL; ++ + msrs_free(msrs); + msrs = NULL; + } +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c +index c8d11da85becf..7b52691c45d26 100644 +--- a/drivers/edac/i10nm_base.c ++++ b/drivers/edac/i10nm_base.c +@@ -6,6 +6,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/io.h> + #include <asm/cpu_device_id.h> + #include <asm/intel-family.h> + #include <asm/mce.h> +@@ -19,14 +20,16 @@ + #define i10nm_printk(level, fmt, arg...) \ + edac_printk(level, "i10nm", fmt, ##arg) + +-#define I10NM_GET_SCK_BAR(d, reg) \ ++#define I10NM_GET_SCK_BAR(d, reg) \ + pci_read_config_dword((d)->uracu, 0xd0, &(reg)) + #define I10NM_GET_IMC_BAR(d, i, reg) \ + pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg)) + #define I10NM_GET_DIMMMTR(m, i, j) \ +- (*(u32 *)((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)) ++ readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4) + #define I10NM_GET_MCDDRTCFG(m, i, j) \ +- (*(u32 *)((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)) ++ readl((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4) ++#define I10NM_GET_MCMTR(m, i) \ ++ readl((m)->mbase + 0x20ef8 + (i) * 0x4000) + + #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23) + #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12) +@@ -148,7 +151,7 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan) + { + u32 mcmtr; + +- mcmtr = *(u32 *)(imc->mbase + 0x20ef8 + chan * 0x4000); ++ mcmtr = I10NM_GET_MCMTR(imc, chan); + edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr); + + return !!GET_BITFIELD(mcmtr, 2, 2); +diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c +index 7f28edb070bd0..6c474fbef32af 100644 +--- a/drivers/edac/mce_amd.c ++++ b/drivers/edac/mce_amd.c +@@ -1003,7 +1003,7 @@ static void decode_smca_error(struct mce *m) + pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); + + if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) +- decode_dram_ecc(cpu_to_node(m->extcpu), m); ++ decode_dram_ecc(topology_die_id(m->extcpu), m); + } + + static inline void amd_decode_err_code(u16 ec) +diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c +index 4a410fd2ea9ae..92af97e00828f 100644 +--- a/drivers/extcon/extcon-max77693.c ++++ b/drivers/extcon/extcon-max77693.c +@@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver); + MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver"); + MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("platform:extcon-max77693"); ++MODULE_ALIAS("platform:max77693-muic"); +diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c +index ce336899d6366..66196b293b6c2 100644 +--- a/drivers/firmware/arm_scmi/notify.c ++++ b/drivers/firmware/arm_scmi/notify.c +@@ -1474,17 +1474,17 @@ int scmi_notification_init(struct scmi_handle *handle) + ni->gid = gid; + ni->handle = handle; + ++ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, ++ sizeof(char *), GFP_KERNEL); ++ if (!ni->registered_protocols) ++ goto err; ++ + ni->notify_wq = alloc_workqueue(dev_name(handle->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!ni->notify_wq) + goto err; + +- ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, +- sizeof(char *), GFP_KERNEL); +- if (!ni->registered_protocols) +- goto err; +- + mutex_init(&ni->pending_mtx); + hash_init(ni->pending_events_handlers); + +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index 6c6eec044a978..df3f9bcab581c 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -57,6 +57,7 @@ struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), ++ .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), + MMAP_LOCK_INITIALIZER(efi_mm) + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), +diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c +index c1bbba9ee93a3..440d99c63638b 100644 +--- a/drivers/firmware/tegra/bpmp-debugfs.c ++++ b/drivers/firmware/tegra/bpmp-debugfs.c +@@ -412,16 +412,12 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, + goto out; + } + +- len = strlen(ppath) + strlen(name) + 1; ++ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name); + if (len >= pathlen) { + err = -EINVAL; + goto out; + } + +- strncpy(pathbuf, ppath, pathlen); +- strncat(pathbuf, name, strlen(name)); +- strcat(pathbuf, "/"); +- + err = bpmp_populate_debugfs_inband(bpmp, dentry, + pathbuf); + if (err < 0) +diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c +index c006ec008a1aa..90dbe58ca1edc 100644 +--- a/drivers/fsi/fsi-master-aspeed.c ++++ b/drivers/fsi/fsi-master-aspeed.c +@@ -8,6 +8,7 @@ + #include <linux/io.h> + #include <linux/mfd/syscon.h> + #include <linux/module.h> ++#include <linux/mutex.h> + #include <linux/of.h> + #include <linux/platform_device.h> + #include <linux/regmap.h> +@@ -19,6 +20,7 @@ + + struct fsi_master_aspeed { + struct fsi_master master; ++ struct mutex lock; /* protect HW access */ + struct device *dev; + void __iomem *base; + struct clk *clk; +@@ -254,6 +256,8 @@ static int aspeed_master_read(struct fsi_master *master, int link, + addr |= id << 21; + addr += link * FSI_HUB_LINK_SIZE; + ++ mutex_lock(&aspeed->lock); ++ + switch (size) { + case 1: + ret = opb_readb(aspeed, fsi_base + addr, val); +@@ -265,14 +269,14 @@ static int aspeed_master_read(struct fsi_master *master, int link, + ret = opb_readl(aspeed, fsi_base + addr, val); + break; + default: +- return -EINVAL; ++ ret = -EINVAL; ++ goto done; + } + + ret = check_errors(aspeed, ret); +- if (ret) +- return ret; +- +- return 0; ++done: ++ mutex_unlock(&aspeed->lock); ++ return ret; + } + + static int aspeed_master_write(struct fsi_master *master, int link, +@@ -287,6 +291,8 @@ static int aspeed_master_write(struct fsi_master *master, int link, + addr |= id << 21; + addr += link * FSI_HUB_LINK_SIZE; + ++ mutex_lock(&aspeed->lock); ++ + switch (size) { + case 1: + ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val); +@@ -298,14 +304,14 @@ static int aspeed_master_write(struct fsi_master *master, int link, + ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val); + break; + default: +- return -EINVAL; ++ ret = -EINVAL; ++ goto done; + } + + ret = check_errors(aspeed, ret); +- if (ret) +- return ret; +- +- return 0; ++done: ++ mutex_unlock(&aspeed->lock); ++ return ret; + } + + static int aspeed_master_link_enable(struct fsi_master *master, int link, +@@ -320,17 +326,21 @@ static int aspeed_master_link_enable(struct fsi_master *master, int link, + + reg = cpu_to_be32(0x80000000 >> bit); + +- if (!enable) +- return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), +- reg); ++ mutex_lock(&aspeed->lock); ++ ++ if (!enable) { ++ ret = opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), reg); ++ goto done; ++ } + + ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg); + if (ret) +- return ret; ++ goto done; + + mdelay(FSI_LINK_ENABLE_SETUP_TIME); +- +- return 0; ++done: ++ mutex_unlock(&aspeed->lock); ++ return ret; + } + + static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id) +@@ -431,9 +441,11 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att + { + struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev); + ++ mutex_lock(&aspeed->lock); + gpiod_set_value(aspeed->cfam_reset_gpio, 1); + usleep_range(900, 1000); + gpiod_set_value(aspeed->cfam_reset_gpio, 0); ++ mutex_unlock(&aspeed->lock); + + return count; + } +@@ -597,6 +609,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) + + dev_set_drvdata(&pdev->dev, aspeed); + ++ mutex_init(&aspeed->lock); + aspeed_master_init(aspeed); + + rc = fsi_master_register(&aspeed->master); +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 6e3c4d7a7d146..4ad3c4b276dcf 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1477,7 +1477,8 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc) + if (WARN_ON(gc->irq.irq_enable)) + return; + /* Check if the irqchip already has this hook... */ +- if (irqchip->irq_enable == gpiochip_irq_enable) { ++ if (irqchip->irq_enable == gpiochip_irq_enable || ++ irqchip->irq_mask == gpiochip_irq_mask) { + /* + * ...and if so, give a gentle warning that this is bad + * practice. +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index 65d1b23d7e746..b9c11c2b2885a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -1414,10 +1414,12 @@ out: + pm_runtime_put_autosuspend(connector->dev->dev); + } + +- drm_dp_set_subconnector_property(&amdgpu_connector->base, +- ret, +- amdgpu_dig_connector->dpcd, +- amdgpu_dig_connector->downstream_ports); ++ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || ++ connector->connector_type == DRM_MODE_CONNECTOR_eDP) ++ drm_dp_set_subconnector_property(&amdgpu_connector->base, ++ ret, ++ amdgpu_dig_connector->dpcd, ++ amdgpu_dig_connector->downstream_ports); + return ret; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +index 8c9bacfdbc300..c485ec86804e5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +@@ -193,10 +193,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) + } + + bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, +- int queue) ++ int pipe, int queue) + { +- /* Policy: make queue 0 of each pipe as high priority compute queue */ +- return (queue == 0); ++ bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); ++ int cond; ++ /* Policy: alternate between normal and high priority */ ++ cond = multipipe_policy ? pipe : queue; ++ ++ return ((cond % 2) != 0); + + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +index 258498cbf1eba..f353a5b71804e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +@@ -373,7 +373,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, + bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, + int pipe, int queue); + bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, +- int queue); ++ int pipe, int queue); + int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, + int pipe, int queue); + void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +index 3e4892b7b7d3c..ff4e226739308 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +@@ -494,13 +494,14 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) + break; + } + +- if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) ++ if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { + size = 0; +- else ++ } else { + size = amdgpu_gmc_get_vbios_fb_size(adev); + +- if (adev->mman.keep_stolen_vga_memory) +- size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); ++ if (adev->mman.keep_stolen_vga_memory) ++ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); ++ } + + /* set to 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +index 55f4b8c3b9338..4ebb43e090999 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +@@ -4334,7 +4334,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + + ring->pipe; +- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? ++ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, ++ ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + r = amdgpu_ring_init(adev, ring, 1024, +@@ -6360,7 +6361,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { +- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { ++ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, ++ ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +index 94b7e0531d092..c36258d56b445 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +@@ -1915,7 +1915,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + + ring->pipe; + +- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? ++ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, ++ ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + r = amdgpu_ring_init(adev, ring, 1024, +@@ -4433,7 +4434,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { +- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { ++ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, ++ ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index 0d8e203b10efb..957c12b727676 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -2228,7 +2228,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + + ring->pipe; +- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ? ++ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, ++ ring->queue) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + return amdgpu_ring_init(adev, ring, 1024, +@@ -3383,7 +3384,9 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { +- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { ++ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ++ ring->pipe, ++ ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +index 3de5e14c5ae31..d7f67620f57ba 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +@@ -774,6 +774,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) + struct acpi_table_header *crat_table; + acpi_status status; + void *pcrat_image; ++ int rc = 0; + + if (!crat_image) + return -EINVAL; +@@ -798,14 +799,17 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) + } + + pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL); +- if (!pcrat_image) +- return -ENOMEM; ++ if (!pcrat_image) { ++ rc = -ENOMEM; ++ goto out; ++ } + + memcpy(pcrat_image, crat_table, crat_table->length); + *crat_image = pcrat_image; + *size = crat_table->length; +- +- return 0; ++out: ++ acpi_put_table(crat_table); ++ return rc; + } + + /* Memory required to create Virtual CRAT. +@@ -988,6 +992,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) + CRAT_OEMID_LENGTH); + memcpy(crat_table->oem_table_id, acpi_table->oem_table_id, + CRAT_OEMTABLEID_LENGTH); ++ acpi_put_table(acpi_table); + } + crat_table->total_entries = 0; + crat_table->num_domains = 0; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 0f7749e9424d4..30c6b9edddb50 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2278,7 +2278,8 @@ void amdgpu_dm_update_connector_after_detect( + + drm_connector_update_edid_property(connector, + aconnector->edid); +- drm_add_edid_modes(connector, aconnector->edid); ++ aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid); ++ drm_connector_list_update(connector); + + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index ff1e9963ec7a2..98464886341f6 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -4230,7 +4230,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) + + if (edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { +- enum ddc_result result = DDC_RESULT_UNKNOWN; ++ enum dc_status result = DC_ERROR_UNEXPECTED; + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; +@@ -4240,7 +4240,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + +- ASSERT(result == DDC_RESULT_SUCESSFULL); ++ ASSERT(result == DC_OK); + } + } + DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +index b8695660b480e..09bc2c249e1af 100644 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +@@ -1614,7 +1614,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma + struct pwl_float_data_ex *rgb = rgb_regamma; + const struct hw_x_point *coord_x = coordinates_x; + +- build_coefficients(&coeff, true); ++ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); + + i = 0; + while (i != hw_points_num + 1) { +diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig +index 018383cfcfa79..5e95bcea43e92 100644 +--- a/drivers/gpu/drm/aspeed/Kconfig ++++ b/drivers/gpu/drm/aspeed/Kconfig +@@ -3,6 +3,7 @@ config DRM_ASPEED_GFX + tristate "ASPEED BMC Display Controller" + depends on DRM && OF + depends on (COMPILE_TEST || ARCH_ASPEED) ++ depends on MMU + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DMA_CMA if HAVE_DMA_CONTIGUOUS +diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c +index 514cbf0eac75a..e0e015243a602 100644 +--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c ++++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c +@@ -160,7 +160,7 @@ static int tpd12s015_probe(struct platform_device *pdev) + + /* Register the IRQ if the HPD GPIO is IRQ-capable. */ + tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio); +- if (tpd->hpd_irq) { ++ if (tpd->hpd_irq >= 0) { + ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL, + tpd12s015_hpd_isr, + IRQF_TRIGGER_RISING | +diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c +index 2510717d5a08f..e25181bf2c480 100644 +--- a/drivers/gpu/drm/drm_dp_aux_dev.c ++++ b/drivers/gpu/drm/drm_dp_aux_dev.c +@@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) + + mutex_lock(&aux_idr_mutex); + aux_dev = idr_find(&aux_idr, index); +- if (!kref_get_unless_zero(&aux_dev->refcount)) ++ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount)) + aux_dev = NULL; + mutex_unlock(&aux_idr_mutex); + +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index 631125b46e04c..b7ddf504e0249 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -3102,6 +3102,8 @@ static int drm_cvt_modes(struct drm_connector *connector, + + height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; + switch (cvt->code[1] & 0x0c) { ++ /* default - because compiler doesn't see that we've enumerated all cases */ ++ default: + case 0x00: + width = height * 4 / 3; + break; +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c +index 720a767118c9c..deb4fd13591d2 100644 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c +@@ -2083,7 +2083,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + drm_encoder_cleanup(encoder); + cdv_intel_dp_destroy(connector); +- goto err_priv; ++ goto err_connector; + } else { + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index b07dc1156a0e6..bcc80f428172b 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, + return true; + + if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && +- (vma->node.start + vma->node.size - 1) >> 32) ++ (vma->node.start + vma->node.size + 4095) >> 32) + return true; + + if (flags & __EXEC_OBJECT_NEEDS_MAP && +diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c +index 961d671f171b4..f54087ac44d35 100644 +--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c ++++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c +@@ -111,7 +111,8 @@ static bool dcss_plane_can_rotate(const struct drm_format_info *format, + supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_MASK; + else if (!format->is_yuv && +- modifier == DRM_FORMAT_MOD_VIVANTE_TILED) ++ (modifier == DRM_FORMAT_MOD_VIVANTE_TILED || ++ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)) + supported_rotation = DRM_MODE_ROTATE_MASK | + DRM_MODE_REFLECT_MASK; + else if (format->is_yuv && linear_format && +@@ -273,6 +274,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, + u32 src_w, src_h, dst_w, dst_h; + struct drm_rect src, dst; + bool enable = true; ++ bool is_rotation_90_or_270; + + if (!fb || !state->crtc || !state->visible) + return; +@@ -311,8 +313,13 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, + + dcss_plane_atomic_set_base(dcss_plane); + ++ is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 | ++ DRM_MODE_ROTATE_270); ++ + dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, +- state->fb->format, src_w, src_h, ++ state->fb->format, ++ is_rotation_90_or_270 ? src_h : src_w, ++ is_rotation_90_or_270 ? src_w : src_h, + dst_w, dst_h, + drm_mode_vrefresh(&crtc_state->mode)); + +diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c +index 92f8bd907193f..210f5e1630081 100644 +--- a/drivers/gpu/drm/mcde/mcde_drv.c ++++ b/drivers/gpu/drm/mcde/mcde_drv.c +@@ -331,8 +331,8 @@ static int mcde_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- ret = -EINVAL; ++ if (irq < 0) { ++ ret = irq; + goto clk_disable; + } + +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +index 8eba44be3a8ae..3064eac1a7507 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +@@ -359,7 +359,7 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { + + static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_OVL] = "ovl", +- [MTK_DISP_OVL_2L] = "ovl_2l", ++ [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_RDMA] = "rdma", + [MTK_DISP_WDMA] = "wdma", + [MTK_DISP_COLOR] = "color", +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index 8b9c8dd788c41..3d1de9cbb1c8d 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -389,15 +389,17 @@ static void meson_drv_unbind(struct device *dev) + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); + } + ++ drm_dev_unregister(drm); ++ drm_kms_helper_poll_fini(drm); ++ drm_atomic_helper_shutdown(drm); ++ component_unbind_all(dev, drm); ++ drm_irq_uninstall(drm); ++ drm_dev_put(drm); ++ + if (priv->afbcd.ops) { + priv->afbcd.ops->reset(priv); + meson_rdma_free(priv); + } +- +- drm_dev_unregister(drm); +- drm_irq_uninstall(drm); +- drm_kms_helper_poll_fini(drm); +- drm_dev_put(drm); + } + + static const struct component_master_ops meson_drv_master_ops = { +diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c +index 29a8ff41595d2..aad75a22dc338 100644 +--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c ++++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c +@@ -145,8 +145,6 @@ struct meson_dw_hdmi { + struct reset_control *hdmitx_apb; + struct reset_control *hdmitx_ctrl; + struct reset_control *hdmitx_phy; +- struct clk *hdmi_pclk; +- struct clk *venci_clk; + struct regulator *hdmi_supply; + u32 irq_stat; + struct dw_hdmi *hdmi; +@@ -941,6 +939,34 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) + + } + ++static void meson_disable_regulator(void *data) ++{ ++ regulator_disable(data); ++} ++ ++static void meson_disable_clk(void *data) ++{ ++ clk_disable_unprepare(data); ++} ++ ++static int meson_enable_clk(struct device *dev, char *name) ++{ ++ struct clk *clk; ++ int ret; ++ ++ clk = devm_clk_get(dev, name); ++ if (IS_ERR(clk)) { ++ dev_err(dev, "Unable to get %s pclk\n", name); ++ return PTR_ERR(clk); ++ } ++ ++ ret = clk_prepare_enable(clk); ++ if (!ret) ++ ret = devm_add_action_or_reset(dev, meson_disable_clk, clk); ++ ++ return ret; ++} ++ + static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + void *data) + { +@@ -989,6 +1015,10 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + ret = regulator_enable(meson_dw_hdmi->hdmi_supply); + if (ret) + return ret; ++ ret = devm_add_action_or_reset(dev, meson_disable_regulator, ++ meson_dw_hdmi->hdmi_supply); ++ if (ret) ++ return ret; + } + + meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev, +@@ -1017,19 +1047,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + if (IS_ERR(meson_dw_hdmi->hdmitx)) + return PTR_ERR(meson_dw_hdmi->hdmitx); + +- meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr"); +- if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) { +- dev_err(dev, "Unable to get HDMI pclk\n"); +- return PTR_ERR(meson_dw_hdmi->hdmi_pclk); +- } +- clk_prepare_enable(meson_dw_hdmi->hdmi_pclk); ++ ret = meson_enable_clk(dev, "isfr"); ++ if (ret) ++ return ret; + +- meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci"); +- if (IS_ERR(meson_dw_hdmi->venci_clk)) { +- dev_err(dev, "Unable to get venci clk\n"); +- return PTR_ERR(meson_dw_hdmi->venci_clk); +- } +- clk_prepare_enable(meson_dw_hdmi->venci_clk); ++ ret = meson_enable_clk(dev, "iahb"); ++ if (ret) ++ return ret; ++ ++ ret = meson_enable_clk(dev, "venci"); ++ if (ret) ++ return ret; + + dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi, + &meson_dw_hdmi_regmap_config); +@@ -1062,10 +1090,10 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, + + encoder->possible_crtcs = BIT(0); + +- DRM_DEBUG_DRIVER("encoder initialized\n"); +- + meson_dw_hdmi_init(meson_dw_hdmi); + ++ DRM_DEBUG_DRIVER("encoder initialized\n"); ++ + /* Bridge / Connector */ + + dw_plat_data->priv_data = meson_dw_hdmi; +diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig +index e5816b4984942..dabb4a1ccdcf7 100644 +--- a/drivers/gpu/drm/msm/Kconfig ++++ b/drivers/gpu/drm/msm/Kconfig +@@ -4,8 +4,8 @@ config DRM_MSM + tristate "MSM DRM" + depends on DRM + depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) ++ depends on IOMMU_SUPPORT + depends on OF && COMMON_CLK +- depends on MMU + depends on QCOM_OCMEM || QCOM_OCMEM=n + select IOMMU_IO_PGTABLE + select QCOM_MDT_LOADER if ARCH_QCOM +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +index d6804a8023555..69ed2c6094665 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +@@ -755,12 +755,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) + gpu_write(gpu, REG_A5XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + +- /* Disable preemption if WHERE_AM_I isn't available */ +- if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) { +- a5xx_preempt_fini(gpu); +- gpu->nr_rings = 1; +- } else { +- /* Create a privileged buffer for the RPTR shadow */ ++ /* Create a privileged buffer for the RPTR shadow */ ++ if (a5xx_gpu->has_whereami) { + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, +@@ -774,6 +770,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) + + gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, + REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0])); ++ } else if (gpu->nr_rings > 1) { ++ /* Disable preemption if WHERE_AM_I isn't available */ ++ a5xx_preempt_fini(gpu); ++ gpu->nr_rings = 1; + } + + a5xx_preempt_hw_init(gpu); +@@ -1207,7 +1207,9 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) + static int a5xx_pm_suspend(struct msm_gpu *gpu) + { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); ++ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + u32 mask = 0xf; ++ int i, ret; + + /* A510 has 3 XIN ports in VBIF */ + if (adreno_is_a510(adreno_gpu)) +@@ -1227,7 +1229,15 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + +- return msm_gpu_pm_suspend(gpu); ++ ret = msm_gpu_pm_suspend(gpu); ++ if (ret) ++ return ret; ++ ++ if (a5xx_gpu->has_whereami) ++ for (i = 0; i < gpu->nr_rings; i++) ++ a5xx_gpu->shadow[i] = 0; ++ ++ return 0; + } + + static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 948f3656c20ca..420ca4a0eb5f7 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1045,12 +1045,21 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) + { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); ++ int i, ret; + + trace_msm_gpu_suspend(0); + + devfreq_suspend_device(gpu->devfreq.devfreq); + +- return a6xx_gmu_stop(a6xx_gpu); ++ ret = a6xx_gmu_stop(a6xx_gpu); ++ if (ret) ++ return ret; ++ ++ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) ++ for (i = 0; i < gpu->nr_rings; i++) ++ a6xx_gpu->shadow[i] = 0; ++ ++ return 0; + } + + static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +index 393858ef8a832..37c8270681c23 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +@@ -219,9 +219,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, + int i, ret = 0; + u64 avg_bw; + +- if (!kms->num_paths) +- return -EINVAL; +- + drm_for_each_crtc(tmp_crtc, crtc->dev) { + if (tmp_crtc->enabled && + curr_client_type == +@@ -239,6 +236,9 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, + } + } + ++ if (!kms->num_paths) ++ return 0; ++ + avg_bw = perf.bw_ctl; + do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + +diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c +index b15b4ce4ba35a..4963bfe6a4726 100644 +--- a/drivers/gpu/drm/msm/dp/dp_catalog.c ++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c +@@ -572,6 +572,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) + dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); + } + ++u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog) ++{ ++ struct dp_catalog_private *catalog = container_of(dp_catalog, ++ struct dp_catalog_private, dp_catalog); ++ u32 status; ++ ++ status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); ++ status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; ++ status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; ++ ++ return status; ++} ++ + u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) + { + struct dp_catalog_private *catalog = container_of(dp_catalog, +diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h +index 4b7666f1fe6fe..6d257dbebf294 100644 +--- a/drivers/gpu/drm/msm/dp/dp_catalog.h ++++ b/drivers/gpu/drm/msm/dp/dp_catalog.h +@@ -97,6 +97,7 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); + void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en); + void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); ++u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog); + u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); + void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); + int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c +index 2e3e1917351f0..c83a1650437da 100644 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c +@@ -1061,23 +1061,15 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, + static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) + { +- int len = 0; +- u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; +- u32 link_status_read_max_retries = 100; +- +- while (--link_status_read_max_retries) { +- len = drm_dp_dpcd_read_link_status(ctrl->aux, +- link_status); +- if (len != DP_LINK_STATUS_SIZE) { +- DRM_ERROR("DP link status read failed, err: %d\n", len); +- return len; +- } ++ int ret = 0, len; + +- if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) +- return 0; ++ len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); ++ if (len != DP_LINK_STATUS_SIZE) { ++ DRM_ERROR("DP link status read failed, err: %d\n", len); ++ ret = -EINVAL; + } + +- return -ETIMEDOUT; ++ return ret; + } + + static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, +@@ -1400,6 +1392,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) + void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) + { + struct dp_ctrl_private *ctrl; ++ struct dp_io *dp_io; ++ struct phy *phy; + + if (!dp_ctrl) { + DRM_ERROR("Invalid input data\n"); +@@ -1407,8 +1401,11 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); ++ dp_io = &ctrl->parser->io; ++ phy = dp_io->phy; + + dp_catalog_ctrl_enable_irq(ctrl->catalog, false); ++ phy_exit(phy); + + DRM_DEBUG_DP("Host deinitialized successfully\n"); + } +@@ -1643,9 +1640,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) + if (rc) + return rc; + +- ctrl->link->phy_params.p_level = 0; +- ctrl->link->phy_params.v_level = 0; +- + while (--link_train_max_retries && + !atomic_read(&ctrl->dp_ctrl.aborted)) { + rc = dp_ctrl_reinitialize_mainlink(ctrl); +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c +index e175aa3fd3a93..fe0279542a1c2 100644 +--- a/drivers/gpu/drm/msm/dp/dp_display.c ++++ b/drivers/gpu/drm/msm/dp/dp_display.c +@@ -108,14 +108,12 @@ struct dp_display_private { + /* event related only access by event thread */ + struct mutex event_mutex; + wait_queue_head_t event_q; +- atomic_t hpd_state; ++ u32 hpd_state; + u32 event_pndx; + u32 event_gndx; + struct dp_event event_list[DP_EVENT_Q_MAX]; + spinlock_t event_lock; + +- struct completion resume_comp; +- + struct dp_audio *audio; + }; + +@@ -335,6 +333,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) + dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; + dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes; + ++ dp_link_reset_phy_params_vx_px(dp->link); + rc = dp_ctrl_on_link(dp->ctrl); + if (rc) { + DRM_ERROR("failed to complete DP link training\n"); +@@ -366,6 +365,20 @@ static void dp_display_host_init(struct dp_display_private *dp) + dp->core_initialized = true; + } + ++static void dp_display_host_deinit(struct dp_display_private *dp) ++{ ++ if (!dp->core_initialized) { ++ DRM_DEBUG_DP("DP core not initialized\n"); ++ return; ++ } ++ ++ dp_ctrl_host_deinit(dp->ctrl); ++ dp_aux_deinit(dp->aux); ++ dp_power_deinit(dp->power); ++ ++ dp->core_initialized = false; ++} ++ + static int dp_display_usbpd_configure_cb(struct device *dev) + { + int rc = 0; +@@ -490,7 +503,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) + + mutex_lock(&dp->event_mutex); + +- state = atomic_read(&dp->hpd_state); ++ state = dp->hpd_state; + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; +@@ -508,17 +521,14 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) + return 0; + } + +- if (state == ST_SUSPENDED) +- tout = DP_TIMEOUT_NONE; +- +- atomic_set(&dp->hpd_state, ST_CONNECT_PENDING); ++ dp->hpd_state = ST_CONNECT_PENDING; + + hpd->hpd_high = 1; + + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); + if (ret) { /* failed */ + hpd->hpd_high = 0; +- atomic_set(&dp->hpd_state, ST_DISCONNECTED); ++ dp->hpd_state = ST_DISCONNECTED; + } + + /* start sanity checking */ +@@ -539,10 +549,10 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) + + mutex_lock(&dp->event_mutex); + +- state = atomic_read(&dp->hpd_state); ++ state = dp->hpd_state; + if (state == ST_CONNECT_PENDING) { + dp_display_enable(dp, 0); +- atomic_set(&dp->hpd_state, ST_CONNECTED); ++ dp->hpd_state = ST_CONNECTED; + } + + mutex_unlock(&dp->event_mutex); +@@ -553,7 +563,14 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) + static void dp_display_handle_plugged_change(struct msm_dp *dp_display, + bool plugged) + { +- if (dp_display->plugged_cb && dp_display->codec_dev) ++ struct dp_display_private *dp; ++ ++ dp = container_of(dp_display, ++ struct dp_display_private, dp_display); ++ ++ /* notify audio subsystem only if sink supports audio */ ++ if (dp_display->plugged_cb && dp_display->codec_dev && ++ dp->audio_supported) + dp_display->plugged_cb(dp_display->codec_dev, plugged); + } + +@@ -567,7 +584,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) + + mutex_lock(&dp->event_mutex); + +- state = atomic_read(&dp->hpd_state); ++ state = dp->hpd_state; + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; +@@ -585,7 +602,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) + return 0; + } + +- atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING); ++ dp->hpd_state = ST_DISCONNECT_PENDING; + + /* disable HPD plug interrupt until disconnect is done */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK +@@ -620,10 +637,10 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data + + mutex_lock(&dp->event_mutex); + +- state = atomic_read(&dp->hpd_state); ++ state = dp->hpd_state; + if (state == ST_DISCONNECT_PENDING) { + dp_display_disable(dp, 0); +- atomic_set(&dp->hpd_state, ST_DISCONNECTED); ++ dp->hpd_state = ST_DISCONNECTED; + } + + mutex_unlock(&dp->event_mutex); +@@ -638,7 +655,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) + mutex_lock(&dp->event_mutex); + + /* irq_hpd can happen at either connected or disconnected state */ +- state = atomic_read(&dp->hpd_state); ++ state = dp->hpd_state; + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; +@@ -789,17 +806,10 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) + + dp_display = g_dp_display; + +- if (dp_display->power_on) { +- DRM_DEBUG_DP("Link already setup, return\n"); +- return 0; +- } +- + rc = dp_ctrl_on_stream(dp->ctrl); + if (!rc) + dp_display->power_on = true; + +- /* complete resume_comp regardless it is armed or not */ +- complete(&dp->resume_comp); + return rc; + } + +@@ -828,9 +838,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) + + dp_display = g_dp_display; + +- if (!dp_display->power_on) +- return -EINVAL; +- + /* wait only if audio was enabled */ + if (dp_display->audio_enabled) { + if (!wait_for_completion_timeout(&dp->audio_comp, +@@ -1151,9 +1158,6 @@ static int dp_display_probe(struct platform_device *pdev) + } + + mutex_init(&dp->event_mutex); +- +- init_completion(&dp->resume_comp); +- + g_dp_display = &dp->dp_display; + + /* Store DP audio handle inside DP display */ +@@ -1189,20 +1193,54 @@ static int dp_display_remove(struct platform_device *pdev) + + static int dp_pm_resume(struct device *dev) + { ++ struct platform_device *pdev = to_platform_device(dev); ++ struct msm_dp *dp_display = platform_get_drvdata(pdev); ++ struct dp_display_private *dp; ++ u32 status; ++ ++ dp = container_of(dp_display, struct dp_display_private, dp_display); ++ ++ mutex_lock(&dp->event_mutex); ++ ++ /* start from disconnected state */ ++ dp->hpd_state = ST_DISCONNECTED; ++ ++ /* turn on dp ctrl/phy */ ++ dp_display_host_init(dp); ++ ++ dp_catalog_ctrl_hpd_config(dp->catalog); ++ ++ status = dp_catalog_hpd_get_state_status(dp->catalog); ++ ++ if (status) { ++ dp->dp_display.is_connected = true; ++ } else { ++ dp->dp_display.is_connected = false; ++ /* make sure next resume host_init be called */ ++ dp->core_initialized = false; ++ } ++ ++ mutex_unlock(&dp->event_mutex); ++ + return 0; + } + + static int dp_pm_suspend(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); +- struct dp_display_private *dp = platform_get_drvdata(pdev); ++ struct msm_dp *dp_display = platform_get_drvdata(pdev); ++ struct dp_display_private *dp; + +- if (!dp) { +- DRM_ERROR("DP driver bind failed. Invalid driver data\n"); +- return -EINVAL; +- } ++ dp = container_of(dp_display, struct dp_display_private, dp_display); ++ ++ mutex_lock(&dp->event_mutex); + +- atomic_set(&dp->hpd_state, ST_SUSPENDED); ++ if (dp->core_initialized == true) ++ dp_display_host_deinit(dp); ++ ++ dp->hpd_state = ST_SUSPENDED; ++ ++ mutex_unlock(&dp->event_mutex); + + return 0; + } +@@ -1317,19 +1355,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + return 0; + } + +-static int dp_display_wait4resume_done(struct dp_display_private *dp) +-{ +- int ret = 0; +- +- reinit_completion(&dp->resume_comp); +- if (!wait_for_completion_timeout(&dp->resume_comp, +- WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) { +- DRM_ERROR("wait4resume_done timedout\n"); +- ret = -ETIMEDOUT; +- } +- return ret; +-} +- + int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) + { + int rc = 0; +@@ -1344,6 +1369,8 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) + + mutex_lock(&dp_display->event_mutex); + ++ dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); ++ + rc = dp_display_set_mode(dp, &dp_display->dp_mode); + if (rc) { + DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); +@@ -1358,15 +1385,10 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) + return rc; + } + +- state = atomic_read(&dp_display->hpd_state); +- if (state == ST_SUSPENDED) { +- /* start link training */ +- dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0); +- mutex_unlock(&dp_display->event_mutex); ++ state = dp_display->hpd_state; + +- /* wait until dp interface is up */ +- goto resume_done; +- } ++ if (state == ST_SUSPEND_PENDING) ++ dp_display_host_init(dp_display); + + dp_display_enable(dp_display, 0); + +@@ -1377,21 +1399,15 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) + dp_display_unprepare(dp); + } + +- dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); +- + if (state == ST_SUSPEND_PENDING) + dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); + + /* completed connection */ +- atomic_set(&dp_display->hpd_state, ST_CONNECTED); ++ dp_display->hpd_state = ST_CONNECTED; + + mutex_unlock(&dp_display->event_mutex); + + return rc; +- +-resume_done: +- dp_display_wait4resume_done(dp_display); +- return rc; + } + + int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder) +@@ -1415,20 +1431,20 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) + + mutex_lock(&dp_display->event_mutex); + ++ dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); ++ + dp_display_disable(dp_display, 0); + + rc = dp_display_unprepare(dp); + if (rc) + DRM_ERROR("DP display unprepare failed, rc=%d\n", rc); + +- dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); +- +- state = atomic_read(&dp_display->hpd_state); ++ state = dp_display->hpd_state; + if (state == ST_DISCONNECT_PENDING) { + /* completed disconnection */ +- atomic_set(&dp_display->hpd_state, ST_DISCONNECTED); ++ dp_display->hpd_state = ST_DISCONNECTED; + } else { +- atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING); ++ dp_display->hpd_state = ST_SUSPEND_PENDING; + } + + mutex_unlock(&dp_display->event_mutex); +diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c +index c811da515fb3b..be986da78c4a5 100644 +--- a/drivers/gpu/drm/msm/dp/dp_link.c ++++ b/drivers/gpu/drm/msm/dp/dp_link.c +@@ -773,7 +773,8 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; +- link->dp_link.link_params.rate = link->request.test_link_rate; ++ link->dp_link.link_params.rate = ++ drm_dp_bw_code_to_link_rate(link->request.test_link_rate); + + return 0; + } +@@ -869,6 +870,9 @@ static int dp_link_parse_vx_px(struct dp_link_private *link) + drm_dp_get_adjust_request_voltage(link->link_status, 0); + link->dp_link.phy_params.p_level = + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); ++ ++ link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; ++ + DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); +@@ -911,7 +915,8 @@ static int dp_link_process_phy_test_pattern_request( + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; +- link->dp_link.link_params.rate = link->request.test_link_rate; ++ link->dp_link.link_params.rate = ++ drm_dp_bw_code_to_link_rate(link->request.test_link_rate); + + ret = dp_link_parse_vx_px(link); + +@@ -939,22 +944,20 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) + */ + static int dp_link_process_link_status_update(struct dp_link_private *link) + { +- if (!(get_link_status(link->link_status, +- DP_LANE_ALIGN_STATUS_UPDATED) & +- DP_LINK_STATUS_UPDATED) || +- (drm_dp_clock_recovery_ok(link->link_status, +- link->dp_link.link_params.num_lanes) && +- drm_dp_channel_eq_ok(link->link_status, +- link->dp_link.link_params.num_lanes))) +- return -EINVAL; ++ bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, ++ link->dp_link.link_params.num_lanes); + +- DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", +- drm_dp_clock_recovery_ok(link->link_status, +- link->dp_link.link_params.num_lanes), +- drm_dp_clock_recovery_ok(link->link_status, +- link->dp_link.link_params.num_lanes)); ++ bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, ++ link->dp_link.link_params.num_lanes); + +- return 0; ++ DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", ++ channel_eq_done, clock_recovery_done); ++ ++ if (channel_eq_done && clock_recovery_done) ++ return -EINVAL; ++ ++ ++ return 0; + } + + /** +@@ -1156,6 +1159,12 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) + return 0; + } + ++void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) ++{ ++ dp_link->phy_params.v_level = 0; ++ dp_link->phy_params.p_level = 0; ++} ++ + u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) + { + u32 tbd; +diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h +index 49811b6221e53..9dd4dd9265304 100644 +--- a/drivers/gpu/drm/msm/dp/dp_link.h ++++ b/drivers/gpu/drm/msm/dp/dp_link.h +@@ -135,6 +135,7 @@ static inline u32 dp_link_bit_depth_to_bpc(u32 tbd) + } + } + ++void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); + u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); + int dp_link_process_request(struct dp_link *dp_link); + int dp_link_get_colorimetry_config(struct dp_link *dp_link); +diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h +index 43042ff90a199..268602803d9a3 100644 +--- a/drivers/gpu/drm/msm/dp/dp_reg.h ++++ b/drivers/gpu/drm/msm/dp/dp_reg.h +@@ -32,6 +32,8 @@ + #define DP_DP_IRQ_HPD_INT_ACK (0x00000002) + #define DP_DP_HPD_REPLUG_INT_ACK (0x00000004) + #define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008) ++#define DP_DP_HPD_STATE_STATUS_BITS_MASK (0x0000000F) ++#define DP_DP_HPD_STATE_STATUS_BITS_SHIFT (0x1C) + + #define REG_DP_DP_HPD_INT_MASK (0x0000000C) + #define DP_DP_HPD_PLUG_INT_MASK (0x00000001) +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +index 6ac04fc303f56..e4e9bf04b7368 100644 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +@@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy_cmn_mmio; + u32 val; ++ int ret; + + val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; +@@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); + ++ ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate); ++ if (ret) { ++ DRM_DEV_ERROR(&pll_10nm->pdev->dev, ++ "restore vco rate failed. ret=%d\n", ret); ++ return ret; ++ } ++ + DBG("DSI PLL%d", pll_10nm->id); + + return 0; +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c +index de0dfb8151258..93bf142e4a4e6 100644 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c +@@ -585,6 +585,7 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy_cmn_mmio; + u32 val; ++ int ret; + + val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; +@@ -599,6 +600,13 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val); + ++ ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate); ++ if (ret) { ++ DRM_DEV_ERROR(&pll_7nm->pdev->dev, ++ "restore vco rate failed. ret=%d\n", ret); ++ return ret; ++ } ++ + DBG("DSI PLL%d", pll_7nm->id); + + return 0; +diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h +index b9dd8f8f48872..0b2686b060c73 100644 +--- a/drivers/gpu/drm/msm/msm_drv.h ++++ b/drivers/gpu/drm/msm/msm_drv.h +@@ -423,6 +423,11 @@ static inline int msm_dp_display_disable(struct msm_dp *dp, + { + return -EINVAL; + } ++static inline int msm_dp_display_pre_disable(struct msm_dp *dp, ++ struct drm_encoder *encoder) ++{ ++ return -EINVAL; ++} + static inline void msm_dp_display_mode_set(struct msm_dp *dp, + struct drm_encoder *encoder, + struct drm_display_mode *mode, +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c +index 35122aef037b4..17f26052e8450 100644 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c +@@ -134,11 +134,8 @@ static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb) + return -ENODEV; + + ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0); +- if (ret) { +- DRM_DEV_ERROR(drm->dev, +- "failed to attach bridge: %d\n", ret); +- return ret; +- } ++ if (ret) ++ return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n"); + + mxsfb->bridge = bridge; + +@@ -212,7 +209,8 @@ static int mxsfb_load(struct drm_device *drm, + + ret = mxsfb_attach_bridge(mxsfb); + if (ret) { +- dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); ++ if (ret != -EPROBE_DEFER) ++ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); + goto err_vblank; + } + +diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +index 42ec51bb7b1b0..7f43172488123 100644 +--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c ++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +@@ -889,6 +889,7 @@ static int omap_dmm_probe(struct platform_device *dev) + &omap_dmm->refill_pa, GFP_KERNEL); + if (!omap_dmm->refill_va) { + dev_err(&dev->dev, "could not allocate refill memory\n"); ++ ret = -ENOMEM; + goto fail; + } + +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 2be358fb46f7d..204674fccd646 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -1327,6 +1327,7 @@ static const struct drm_display_mode boe_nv133fhm_n61_modes = { + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 6, + .vtotal = 1080 + 3 + 6 + 31, ++ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, + }; + + /* Also used for boe_nv133fhm_n62 */ +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c +index e6896733838ab..bf7c34cfb84c0 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_device.c ++++ b/drivers/gpu/drm/panfrost/panfrost_device.c +@@ -206,7 +206,6 @@ int panfrost_device_init(struct panfrost_device *pfdev) + struct resource *res; + + mutex_init(&pfdev->sched_lock); +- mutex_init(&pfdev->reset_lock); + INIT_LIST_HEAD(&pfdev->scheduled_jobs); + INIT_LIST_HEAD(&pfdev->as_lru_list); + +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h +index 2e9cbd1c4a58e..67f9f66904be2 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h +@@ -105,7 +105,11 @@ struct panfrost_device { + struct panfrost_perfcnt *perfcnt; + + struct mutex sched_lock; +- struct mutex reset_lock; ++ ++ struct { ++ struct work_struct work; ++ atomic_t pending; ++ } reset; + + struct mutex shrinker_lock; + struct list_head shrinker_list; +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c +index 30e7b7196dab0..1ce2001106e56 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c +@@ -20,12 +20,22 @@ + #include "panfrost_gpu.h" + #include "panfrost_mmu.h" + ++#define JOB_TIMEOUT_MS 500 ++ + #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) + #define job_read(dev, reg) readl(dev->iomem + (reg)) + ++enum panfrost_queue_status { ++ PANFROST_QUEUE_STATUS_ACTIVE, ++ PANFROST_QUEUE_STATUS_STOPPED, ++ PANFROST_QUEUE_STATUS_STARTING, ++ PANFROST_QUEUE_STATUS_FAULT_PENDING, ++}; ++ + struct panfrost_queue_state { + struct drm_gpu_scheduler sched; +- ++ atomic_t status; ++ struct mutex lock; + u64 fence_context; + u64 emit_seqno; + }; +@@ -369,13 +379,64 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) + job_write(pfdev, JOB_INT_MASK, irq_mask); + } + ++static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, ++ struct drm_sched_job *bad) ++{ ++ enum panfrost_queue_status old_status; ++ bool stopped = false; ++ ++ mutex_lock(&queue->lock); ++ old_status = atomic_xchg(&queue->status, ++ PANFROST_QUEUE_STATUS_STOPPED); ++ if (old_status == PANFROST_QUEUE_STATUS_STOPPED) ++ goto out; ++ ++ WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE); ++ drm_sched_stop(&queue->sched, bad); ++ if (bad) ++ drm_sched_increase_karma(bad); ++ ++ stopped = true; ++ ++ /* ++ * Set the timeout to max so the timer doesn't get started ++ * when we return from the timeout handler (restored in ++ * panfrost_scheduler_start()). ++ */ ++ queue->sched.timeout = MAX_SCHEDULE_TIMEOUT; ++ ++out: ++ mutex_unlock(&queue->lock); ++ ++ return stopped; ++} ++ ++static void panfrost_scheduler_start(struct panfrost_queue_state *queue) ++{ ++ enum panfrost_queue_status old_status; ++ ++ mutex_lock(&queue->lock); ++ old_status = atomic_xchg(&queue->status, ++ PANFROST_QUEUE_STATUS_STARTING); ++ WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED); ++ ++ /* Restore the original timeout before starting the scheduler. */ ++ queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); ++ drm_sched_resubmit_jobs(&queue->sched); ++ drm_sched_start(&queue->sched, true); ++ old_status = atomic_xchg(&queue->status, ++ PANFROST_QUEUE_STATUS_ACTIVE); ++ if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING) ++ drm_sched_fault(&queue->sched); ++ ++ mutex_unlock(&queue->lock); ++} ++ + static void panfrost_job_timedout(struct drm_sched_job *sched_job) + { + struct panfrost_job *job = to_panfrost_job(sched_job); + struct panfrost_device *pfdev = job->pfdev; + int js = panfrost_job_get_slot(job); +- unsigned long flags; +- int i; + + /* + * If the GPU managed to complete this jobs fence, the timeout is +@@ -392,40 +453,13 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) + job_read(pfdev, JS_TAIL_LO(js)), + sched_job); + +- if (!mutex_trylock(&pfdev->reset_lock)) ++ /* Scheduler is already stopped, nothing to do. */ ++ if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) + return; + +- for (i = 0; i < NUM_JOB_SLOTS; i++) { +- struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; +- +- drm_sched_stop(sched, sched_job); +- if (js != i) +- /* Ensure any timeouts on other slots have finished */ +- cancel_delayed_work_sync(&sched->work_tdr); +- } +- +- drm_sched_increase_karma(sched_job); +- +- spin_lock_irqsave(&pfdev->js->job_lock, flags); +- for (i = 0; i < NUM_JOB_SLOTS; i++) { +- if (pfdev->jobs[i]) { +- pm_runtime_put_noidle(pfdev->dev); +- panfrost_devfreq_record_idle(&pfdev->pfdevfreq); +- pfdev->jobs[i] = NULL; +- } +- } +- spin_unlock_irqrestore(&pfdev->js->job_lock, flags); +- +- panfrost_device_reset(pfdev); +- +- for (i = 0; i < NUM_JOB_SLOTS; i++) +- drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); +- +- /* restart scheduler after GPU is usable again */ +- for (i = 0; i < NUM_JOB_SLOTS; i++) +- drm_sched_start(&pfdev->js->queue[i].sched, true); +- +- mutex_unlock(&pfdev->reset_lock); ++ /* Schedule a reset if there's no reset in progress. */ ++ if (!atomic_xchg(&pfdev->reset.pending, 1)) ++ schedule_work(&pfdev->reset.work); + } + + static const struct drm_sched_backend_ops panfrost_sched_ops = { +@@ -457,6 +491,8 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) + job_write(pfdev, JOB_INT_CLEAR, mask); + + if (status & JOB_INT_MASK_ERR(j)) { ++ enum panfrost_queue_status old_status; ++ + job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); + + dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", +@@ -465,7 +501,18 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) + job_read(pfdev, JS_HEAD_LO(j)), + job_read(pfdev, JS_TAIL_LO(j))); + +- drm_sched_fault(&pfdev->js->queue[j].sched); ++ /* ++ * When the queue is being restarted we don't report ++ * faults directly to avoid races between the timeout ++ * and reset handlers. panfrost_scheduler_start() will ++ * call drm_sched_fault() after the queue has been ++ * started if status == FAULT_PENDING. ++ */ ++ old_status = atomic_cmpxchg(&pfdev->js->queue[j].status, ++ PANFROST_QUEUE_STATUS_STARTING, ++ PANFROST_QUEUE_STATUS_FAULT_PENDING); ++ if (old_status == PANFROST_QUEUE_STATUS_ACTIVE) ++ drm_sched_fault(&pfdev->js->queue[j].sched); + } + + if (status & JOB_INT_MASK_DONE(j)) { +@@ -492,11 +539,66 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) + return IRQ_HANDLED; + } + ++static void panfrost_reset(struct work_struct *work) ++{ ++ struct panfrost_device *pfdev = container_of(work, ++ struct panfrost_device, ++ reset.work); ++ unsigned long flags; ++ unsigned int i; ++ bool cookie; ++ ++ cookie = dma_fence_begin_signalling(); ++ for (i = 0; i < NUM_JOB_SLOTS; i++) { ++ /* ++ * We want pending timeouts to be handled before we attempt ++ * to stop the scheduler. If we don't do that and the timeout ++ * handler is in flight, it might have removed the bad job ++ * from the list, and we'll lose this job if the reset handler ++ * enters the critical section in panfrost_scheduler_stop() ++ * before the timeout handler. ++ * ++ * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need ++ * something big enough to make sure the timer will not expire ++ * before we manage to stop the scheduler, but we can't use ++ * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job() ++ * considers that as 'timer is not running' and will dequeue ++ * the job without making sure the timeout handler is not ++ * running. ++ */ ++ pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1; ++ cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr); ++ panfrost_scheduler_stop(&pfdev->js->queue[i], NULL); ++ } ++ ++ /* All timers have been stopped, we can safely reset the pending state. */ ++ atomic_set(&pfdev->reset.pending, 0); ++ ++ spin_lock_irqsave(&pfdev->js->job_lock, flags); ++ for (i = 0; i < NUM_JOB_SLOTS; i++) { ++ if (pfdev->jobs[i]) { ++ pm_runtime_put_noidle(pfdev->dev); ++ panfrost_devfreq_record_idle(&pfdev->pfdevfreq); ++ pfdev->jobs[i] = NULL; ++ } ++ } ++ spin_unlock_irqrestore(&pfdev->js->job_lock, flags); ++ ++ panfrost_device_reset(pfdev); ++ ++ for (i = 0; i < NUM_JOB_SLOTS; i++) ++ panfrost_scheduler_start(&pfdev->js->queue[i]); ++ ++ dma_fence_end_signalling(cookie); ++} ++ + int panfrost_job_init(struct panfrost_device *pfdev) + { + struct panfrost_job_slot *js; + int ret, j, irq; + ++ INIT_WORK(&pfdev->reset.work, panfrost_reset); ++ + pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); + if (!js) + return -ENOMEM; +@@ -519,7 +621,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) + + ret = drm_sched_init(&js->queue[j].sched, + &panfrost_sched_ops, +- 1, 0, msecs_to_jiffies(500), ++ 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), + "pan_js"); + if (ret) { + dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); +@@ -558,6 +660,7 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) + int ret, i; + + for (i = 0; i < NUM_JOB_SLOTS; i++) { ++ mutex_init(&js->queue[i].lock); + sched = &js->queue[i].sched; + ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], + DRM_SCHED_PRIORITY_NORMAL, &sched, +@@ -570,10 +673,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) + + void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) + { ++ struct panfrost_device *pfdev = panfrost_priv->pfdev; ++ struct panfrost_job_slot *js = pfdev->js; + int i; + +- for (i = 0; i < NUM_JOB_SLOTS; i++) ++ for (i = 0; i < NUM_JOB_SLOTS; i++) { + drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); ++ mutex_destroy(&js->queue[i].lock); ++ } + } + + int panfrost_job_is_idle(struct panfrost_device *pfdev) +diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c +index c3aa39bd38ecd..b5259cb1383fc 100644 +--- a/drivers/gpu/drm/tve200/tve200_drv.c ++++ b/drivers/gpu/drm/tve200/tve200_drv.c +@@ -200,8 +200,8 @@ static int tve200_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- ret = -EINVAL; ++ if (irq < 0) { ++ ret = irq; + goto clk_disable; + } + +diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c +index fef43f4e3bac4..edcfd8c120c44 100644 +--- a/drivers/gpu/drm/udl/udl_modeset.c ++++ b/drivers/gpu/drm/udl/udl_modeset.c +@@ -303,8 +303,10 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, + } + + urb = udl_get_urb(dev); +- if (!urb) ++ if (!urb) { ++ ret = -ENOMEM; + goto out_drm_gem_shmem_vunmap; ++ } + cmd = urb->transfer_buffer; + + for (i = clip.y1; i < clip.y2; i++) { +diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c +index fa69b94debd9b..7596dc1646484 100644 +--- a/drivers/hsi/controllers/omap_ssi_core.c ++++ b/drivers/hsi/controllers/omap_ssi_core.c +@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi, + + err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); + if (err < 0) +- goto out_err; ++ return err; + ssi->id = err; + + ssi->owner = THIS_MODULE; +diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c +index 41fb17e0d6416..ad11cbddc3a7b 100644 +--- a/drivers/hwmon/ina3221.c ++++ b/drivers/hwmon/ina3221.c +@@ -489,7 +489,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable) + + /* For enabling routine, increase refcount and resume() at first */ + if (enable) { +- ret = pm_runtime_get_sync(ina->pm_dev); ++ ret = pm_runtime_resume_and_get(ina->pm_dev); + if (ret < 0) { + dev_err(dev, "Failed to get PM runtime\n"); + return ret; +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c +index a250481b5a97f..3bc2551577a30 100644 +--- a/drivers/hwmon/k10temp.c ++++ b/drivers/hwmon/k10temp.c +@@ -11,13 +11,6 @@ + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. +- * - Register addresses to read chip voltage and current are also from +- * https://github.com/ocerman/zenpower, and not confirmed from chip +- * datasheets. Current calibration is board specific and not typically +- * shared by board vendors. For this reason, current values are +- * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC +- * current. Reported values can be adjusted using the sensors configuration +- * file. + */ + + #include <linux/bitops.h> +@@ -109,10 +102,7 @@ struct k10temp_data { + int temp_offset; + u32 temp_adjust_mask; + u32 show_temp; +- u32 svi_addr[2]; + bool is_zen; +- bool show_current; +- int cfactor[2]; + }; + + #define TCTL_BIT 0 +@@ -137,16 +127,6 @@ static const struct tctl_offset tctl_offset_table[] = { + { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ + }; + +-static bool is_threadripper(void) +-{ +- return strstr(boot_cpu_data.x86_model_id, "Threadripper"); +-} +- +-static bool is_epyc(void) +-{ +- return strstr(boot_cpu_data.x86_model_id, "EPYC"); +-} +- + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) + { + pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); +@@ -211,16 +191,6 @@ static const char *k10temp_temp_label[] = { + "Tccd8", + }; + +-static const char *k10temp_in_label[] = { +- "Vcore", +- "Vsoc", +-}; +- +-static const char *k10temp_curr_label[] = { +- "Icore", +- "Isoc", +-}; +- + static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +@@ -229,50 +199,6 @@ static int k10temp_read_labels(struct device *dev, + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; +- case hwmon_in: +- *str = k10temp_in_label[channel]; +- break; +- case hwmon_curr: +- *str = k10temp_curr_label[channel]; +- break; +- default: +- return -EOPNOTSUPP; +- } +- return 0; +-} +- +-static int k10temp_read_curr(struct device *dev, u32 attr, int channel, +- long *val) +-{ +- struct k10temp_data *data = dev_get_drvdata(dev); +- u32 regval; +- +- switch (attr) { +- case hwmon_curr_input: +- amd_smn_read(amd_pci_dev_to_node_id(data->pdev), +- data->svi_addr[channel], ®val); +- *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * +- (regval & 0xff), +- 1000); +- break; +- default: +- return -EOPNOTSUPP; +- } +- return 0; +-} +- +-static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) +-{ +- struct k10temp_data *data = dev_get_drvdata(dev); +- u32 regval; +- +- switch (attr) { +- case hwmon_in_input: +- amd_smn_read(amd_pci_dev_to_node_id(data->pdev), +- data->svi_addr[channel], ®val); +- regval = (regval >> 16) & 0xff; +- *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); +- break; + default: + return -EOPNOTSUPP; + } +@@ -331,10 +257,6 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); +- case hwmon_in: +- return k10temp_read_in(dev, attr, channel, val); +- case hwmon_curr: +- return k10temp_read_curr(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +@@ -383,11 +305,6 @@ static umode_t k10temp_is_visible(const void *_data, + return 0; + } + break; +- case hwmon_in: +- case hwmon_curr: +- if (!data->show_current) +- return 0; +- break; + default: + return 0; + } +@@ -517,20 +434,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ +- data->show_current = !is_threadripper() && !is_epyc(); +- data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; +- data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; +- data->cfactor[0] = F17H_M01H_CFACTOR_ICORE; +- data->cfactor[1] = F17H_M01H_CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ +- data->show_current = !is_threadripper() && !is_epyc(); +- data->cfactor[0] = F17H_M31H_CFACTOR_ICORE; +- data->cfactor[1] = F17H_M31H_CFACTOR_ISOC; +- data->svi_addr[0] = F17H_M31H_SVI_TEL_PLANE0; +- data->svi_addr[1] = F17H_M31H_SVI_TEL_PLANE1; + k10temp_get_ccd_support(pdev, data, 8); + break; + } +@@ -542,11 +449,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + switch (boot_cpu_data.x86_model) { + case 0x0 ... 0x1: /* Zen3 */ +- data->show_current = true; +- data->svi_addr[0] = F19H_M01_SVI_TEL_PLANE0; +- data->svi_addr[1] = F19H_M01_SVI_TEL_PLANE1; +- data->cfactor[0] = F19H_M01H_CFACTOR_ICORE; +- data->cfactor[1] = F19H_M01H_CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 8); + break; + } +diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c +index 99430f6cf5a5d..a61313f320bda 100644 +--- a/drivers/hwtracing/coresight/coresight-catu.c ++++ b/drivers/hwtracing/coresight/coresight-catu.c +@@ -567,7 +567,7 @@ out: + return ret; + } + +-static int __exit catu_remove(struct amba_device *adev) ++static int catu_remove(struct amba_device *adev) + { + struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c +index d28eae93e55c8..61dbc1afd8da5 100644 +--- a/drivers/hwtracing/coresight/coresight-cti-core.c ++++ b/drivers/hwtracing/coresight/coresight-cti-core.c +@@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev) + if (drvdata->csdev_release) + drvdata->csdev_release(dev); + } +-static int __exit cti_remove(struct amba_device *adev) ++static int cti_remove(struct amba_device *adev) + { + struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c +index 1b320ab581caf..0cf6f0b947b6f 100644 +--- a/drivers/hwtracing/coresight/coresight-etb10.c ++++ b/drivers/hwtracing/coresight/coresight-etb10.c +@@ -803,7 +803,7 @@ err_misc_register: + return ret; + } + +-static int __exit etb_remove(struct amba_device *adev) ++static int etb_remove(struct amba_device *adev) + { + struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c +index 47f610b1c2b18..5bf5a5a4ce6d1 100644 +--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c ++++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c +@@ -902,14 +902,14 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) + return 0; + } + +-static void __exit clear_etmdrvdata(void *info) ++static void clear_etmdrvdata(void *info) + { + int cpu = *(int *)info; + + etmdrvdata[cpu] = NULL; + } + +-static int __exit etm_remove(struct amba_device *adev) ++static int etm_remove(struct amba_device *adev) + { + struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c +index e516e5b879e3a..95b54b0a36252 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c +@@ -1570,14 +1570,14 @@ static struct amba_cs_uci_id uci_id_etm4[] = { + } + }; + +-static void __exit clear_etmdrvdata(void *info) ++static void clear_etmdrvdata(void *info) + { + int cpu = *(int *)info; + + etmdrvdata[cpu] = NULL; + } + +-static int __exit etm4_remove(struct amba_device *adev) ++static int etm4_remove(struct amba_device *adev) + { + struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c +index af40814ce5603..3fc6c678b51d8 100644 +--- a/drivers/hwtracing/coresight/coresight-funnel.c ++++ b/drivers/hwtracing/coresight/coresight-funnel.c +@@ -274,7 +274,7 @@ out_disable_clk: + return ret; + } + +-static int __exit funnel_remove(struct device *dev) ++static int funnel_remove(struct device *dev) + { + struct funnel_drvdata *drvdata = dev_get_drvdata(dev); + +@@ -328,7 +328,7 @@ static int static_funnel_probe(struct platform_device *pdev) + return ret; + } + +-static int __exit static_funnel_remove(struct platform_device *pdev) ++static int static_funnel_remove(struct platform_device *pdev) + { + funnel_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); +@@ -370,7 +370,7 @@ static int dynamic_funnel_probe(struct amba_device *adev, + return funnel_probe(&adev->dev, &adev->res); + } + +-static int __exit dynamic_funnel_remove(struct amba_device *adev) ++static int dynamic_funnel_remove(struct amba_device *adev) + { + return funnel_remove(&adev->dev); + } +diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c +index 62afdde0e5eab..38008aca2c0f4 100644 +--- a/drivers/hwtracing/coresight/coresight-replicator.c ++++ b/drivers/hwtracing/coresight/coresight-replicator.c +@@ -291,7 +291,7 @@ out_disable_clk: + return ret; + } + +-static int __exit replicator_remove(struct device *dev) ++static int replicator_remove(struct device *dev) + { + struct replicator_drvdata *drvdata = dev_get_drvdata(dev); + +@@ -318,7 +318,7 @@ static int static_replicator_probe(struct platform_device *pdev) + return ret; + } + +-static int __exit static_replicator_remove(struct platform_device *pdev) ++static int static_replicator_remove(struct platform_device *pdev) + { + replicator_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); +@@ -388,7 +388,7 @@ static int dynamic_replicator_probe(struct amba_device *adev, + return replicator_probe(&adev->dev, &adev->res); + } + +-static int __exit dynamic_replicator_remove(struct amba_device *adev) ++static int dynamic_replicator_remove(struct amba_device *adev) + { + return replicator_remove(&adev->dev); + } +diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c +index b0ad912651a99..587c1d7f25208 100644 +--- a/drivers/hwtracing/coresight/coresight-stm.c ++++ b/drivers/hwtracing/coresight/coresight-stm.c +@@ -951,7 +951,7 @@ stm_unregister: + return ret; + } + +-static int __exit stm_remove(struct amba_device *adev) ++static int stm_remove(struct amba_device *adev) + { + struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c +index 5653e0945c74b..8169dff5a9f6a 100644 +--- a/drivers/hwtracing/coresight/coresight-tmc-core.c ++++ b/drivers/hwtracing/coresight/coresight-tmc-core.c +@@ -559,7 +559,7 @@ out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + } + +-static int __exit tmc_remove(struct amba_device *adev) ++static int tmc_remove(struct amba_device *adev) + { + struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c +index 566c57e035961..5b35029461a0c 100644 +--- a/drivers/hwtracing/coresight/coresight-tpiu.c ++++ b/drivers/hwtracing/coresight/coresight-tpiu.c +@@ -173,7 +173,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) + return PTR_ERR(drvdata->csdev); + } + +-static int __exit tpiu_remove(struct amba_device *adev) ++static int tpiu_remove(struct amba_device *adev) + { + struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev); + +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index 8b4c35f47a70f..dce75b85253c1 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -366,6 +366,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, + geni_se_select_mode(se, GENI_SE_FIFO); + + writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); ++ geni_se_setup_m_cmd(se, I2C_READ, m_param); + + if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { + geni_se_select_mode(se, GENI_SE_FIFO); +@@ -373,8 +374,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, + dma_buf = NULL; + } + +- geni_se_setup_m_cmd(se, I2C_READ, m_param); +- + time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); + if (!time_left) + geni_i2c_abort_xfer(gi2c); +@@ -408,6 +407,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, + geni_se_select_mode(se, GENI_SE_FIFO); + + writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); ++ geni_se_setup_m_cmd(se, I2C_WRITE, m_param); + + if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { + geni_se_select_mode(se, GENI_SE_FIFO); +@@ -415,8 +415,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, + dma_buf = NULL; + } + +- geni_se_setup_m_cmd(se, I2C_WRITE, m_param); +- + if (!dma_buf) /* Get FIFO IRQ */ + writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); + +diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig +index 91ae90514aff4..17e9ceb9c6c48 100644 +--- a/drivers/iio/adc/Kconfig ++++ b/drivers/iio/adc/Kconfig +@@ -295,7 +295,7 @@ config ASPEED_ADC + config AT91_ADC + tristate "Atmel AT91 ADC" + depends on ARCH_AT91 || COMPILE_TEST +- depends on INPUT && SYSFS ++ depends on INPUT && SYSFS && OF + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c +index 86039e9ecaca1..3a6f239d4acca 100644 +--- a/drivers/iio/adc/ad_sigma_delta.c ++++ b/drivers/iio/adc/ad_sigma_delta.c +@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ad_sd_set_comm); + int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, + unsigned int size, unsigned int val) + { +- uint8_t *data = sigma_delta->data; ++ uint8_t *data = sigma_delta->tx_buf; + struct spi_transfer t = { + .tx_buf = data, + .len = size + 1, +@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(ad_sd_write_reg); + static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, + unsigned int reg, unsigned int size, uint8_t *val) + { +- uint8_t *data = sigma_delta->data; ++ uint8_t *data = sigma_delta->tx_buf; + int ret; + struct spi_transfer t[] = { + { +@@ -146,22 +146,22 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, + { + int ret; + +- ret = ad_sd_read_reg_raw(sigma_delta, reg, size, sigma_delta->data); ++ ret = ad_sd_read_reg_raw(sigma_delta, reg, size, sigma_delta->rx_buf); + if (ret < 0) + goto out; + + switch (size) { + case 4: +- *val = get_unaligned_be32(sigma_delta->data); ++ *val = get_unaligned_be32(sigma_delta->rx_buf); + break; + case 3: +- *val = get_unaligned_be24(&sigma_delta->data[0]); ++ *val = get_unaligned_be24(sigma_delta->rx_buf); + break; + case 2: +- *val = get_unaligned_be16(sigma_delta->data); ++ *val = get_unaligned_be16(sigma_delta->rx_buf); + break; + case 1: +- *val = sigma_delta->data[0]; ++ *val = sigma_delta->rx_buf[0]; + break; + default: + ret = -EINVAL; +@@ -395,11 +395,9 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); ++ uint8_t *data = sigma_delta->rx_buf; + unsigned int reg_size; + unsigned int data_reg; +- uint8_t data[16]; +- +- memset(data, 0x00, 16); + + reg_size = indio_dev->channels[0].scan_type.realbits + + indio_dev->channels[0].scan_type.shift; +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c +index 9b2c548fae957..0a793e7cd53ee 100644 +--- a/drivers/iio/adc/at91_adc.c ++++ b/drivers/iio/adc/at91_adc.c +@@ -1469,7 +1469,7 @@ static struct platform_driver at91_adc_driver = { + .id_table = at91_adc_ids, + .driver = { + .name = DRIVER_NAME, +- .of_match_table = of_match_ptr(at91_adc_dt_ids), ++ .of_match_table = at91_adc_dt_ids, + .pm = &at91_adc_pm_ops, + }, + }; +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c +index 1f3d7d639d378..12584f1631d88 100644 +--- a/drivers/iio/adc/rockchip_saradc.c ++++ b/drivers/iio/adc/rockchip_saradc.c +@@ -462,7 +462,7 @@ static int rockchip_saradc_resume(struct device *dev) + + ret = clk_prepare_enable(info->clk); + if (ret) +- return ret; ++ clk_disable_unprepare(info->pclk); + + return ret; + } +diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c +index 4b4fbe33930ce..b4a128b191889 100644 +--- a/drivers/iio/adc/ti-ads124s08.c ++++ b/drivers/iio/adc/ti-ads124s08.c +@@ -99,6 +99,14 @@ struct ads124s_private { + struct gpio_desc *reset_gpio; + struct spi_device *spi; + struct mutex lock; ++ /* ++ * Used to correctly align data. ++ * Ensure timestamp is naturally aligned. ++ * Note that the full buffer length may not be needed if not ++ * all channels are enabled, as long as the alignment of the ++ * timestamp is maintained. ++ */ ++ u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u32)] __aligned(8); + u8 data[5] ____cacheline_aligned; + }; + +@@ -269,7 +277,6 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ads124s_private *priv = iio_priv(indio_dev); +- u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; + int scan_index, j = 0; + int ret; + +@@ -284,7 +291,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) + if (ret) + dev_err(&priv->spi->dev, "Start ADC conversions failed\n"); + +- buffer[j] = ads124s_read(indio_dev, scan_index); ++ priv->buffer[j] = ads124s_read(indio_dev, scan_index); + ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV); + if (ret) + dev_err(&priv->spi->dev, "Stop ADC conversions failed\n"); +@@ -292,7 +299,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) + j++; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer, + pf->timestamp); + + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h +index a82e040bd1098..32c2ea2d71129 100644 +--- a/drivers/iio/imu/bmi160/bmi160.h ++++ b/drivers/iio/imu/bmi160/bmi160.h +@@ -10,6 +10,13 @@ struct bmi160_data { + struct iio_trigger *trig; + struct regulator_bulk_data supplies[2]; + struct iio_mount_matrix orientation; ++ /* ++ * Ensure natural alignment for timestamp if present. ++ * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts. ++ * If fewer channels are enabled, less space may be needed, as ++ * long as the timestamp is still aligned to 8 bytes. ++ */ ++ __le16 buf[12] __aligned(8); + }; + + extern const struct regmap_config bmi160_regmap_config; +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c +index 222ebb26f0132..82f03a4dc47a7 100644 +--- a/drivers/iio/imu/bmi160/bmi160_core.c ++++ b/drivers/iio/imu/bmi160/bmi160_core.c +@@ -427,8 +427,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmi160_data *data = iio_priv(indio_dev); +- __le16 buf[16]; +- /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */ + int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L; + __le16 sample; + +@@ -438,10 +436,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) + &sample, sizeof(sample)); + if (ret) + goto done; +- buf[j++] = sample; ++ data->buf[j++] = sample; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, data->buf, pf->timestamp); + done: + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +index 42f485634d044..2ab1ac5a2412f 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +@@ -2255,19 +2255,35 @@ st_lsm6dsx_report_motion_event(struct st_lsm6dsx_hw *hw) + static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private) + { + struct st_lsm6dsx_hw *hw = private; ++ int fifo_len = 0, len; + bool event; +- int count; + + event = st_lsm6dsx_report_motion_event(hw); + + if (!hw->settings->fifo_ops.read_fifo) + return event ? IRQ_HANDLED : IRQ_NONE; + +- mutex_lock(&hw->fifo_lock); +- count = hw->settings->fifo_ops.read_fifo(hw); +- mutex_unlock(&hw->fifo_lock); ++ /* ++ * If we are using edge IRQs, new samples can arrive while ++ * processing current interrupt since there are no hw ++ * guarantees the irq line stays "low" long enough to properly ++ * detect the new interrupt. In this case the new sample will ++ * be missed. ++ * Polling FIFO status register allow us to read new ++ * samples even if the interrupt arrives while processing ++ * previous data and the timeslot where the line is "low" is ++ * too short to be properly detected. ++ */ ++ do { ++ mutex_lock(&hw->fifo_lock); ++ len = hw->settings->fifo_ops.read_fifo(hw); ++ mutex_unlock(&hw->fifo_lock); ++ ++ if (len > 0) ++ fifo_len += len; ++ } while (len > 0); + +- return count || event ? IRQ_HANDLED : IRQ_NONE; ++ return fifo_len || event ? IRQ_HANDLED : IRQ_NONE; + } + + static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw) +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c +index a4f6bb96d4f42..276b609d79174 100644 +--- a/drivers/iio/industrialio-buffer.c ++++ b/drivers/iio/industrialio-buffer.c +@@ -865,12 +865,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, + indio_dev->masklength, + in_ind + 1); + while (in_ind != out_ind) { +- in_ind = find_next_bit(indio_dev->active_scan_mask, +- indio_dev->masklength, +- in_ind + 1); + length = iio_storage_bytes_for_si(indio_dev, in_ind); + /* Make sure we are aligned */ + in_loc = roundup(in_loc, length) + length; ++ in_ind = find_next_bit(indio_dev->active_scan_mask, ++ indio_dev->masklength, ++ in_ind + 1); + } + length = iio_storage_bytes_for_si(indio_dev, in_ind); + out_loc = roundup(out_loc, length); +diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c +index aa2972b048334..31224a33bade3 100644 +--- a/drivers/iio/light/rpr0521.c ++++ b/drivers/iio/light/rpr0521.c +@@ -194,6 +194,17 @@ struct rpr0521_data { + bool pxs_need_dis; + + struct regmap *regmap; ++ ++ /* ++ * Ensure correct naturally aligned timestamp. ++ * Note that the read will put garbage data into ++ * the padding but this should not be a problem ++ */ ++ struct { ++ __le16 channels[3]; ++ u8 garbage; ++ s64 ts __aligned(8); ++ } scan; + }; + + static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL); +@@ -449,8 +460,6 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p) + struct rpr0521_data *data = iio_priv(indio_dev); + int err; + +- u8 buffer[16]; /* 3 16-bit channels + padding + ts */ +- + /* Use irq timestamp when reasonable. */ + if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) { + pf->timestamp = data->irq_timestamp; +@@ -461,11 +470,11 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p) + pf->timestamp = iio_get_time_ns(indio_dev); + + err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA, +- &buffer, ++ data->scan.channels, + (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */ + if (!err) + iio_push_to_buffers_with_timestamp(indio_dev, +- buffer, pf->timestamp); ++ &data->scan, pf->timestamp); + else + dev_err(&data->client->dev, + "Trigger consumer can't read from sensor.\n"); +diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h +index 78bc56aad1299..283086887caf5 100644 +--- a/drivers/iio/light/st_uvis25.h ++++ b/drivers/iio/light/st_uvis25.h +@@ -27,6 +27,11 @@ struct st_uvis25_hw { + struct iio_trigger *trig; + bool enabled; + int irq; ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ u8 chan; ++ s64 ts __aligned(8); ++ } scan; + }; + + extern const struct dev_pm_ops st_uvis25_pm_ops; +diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c +index a18a82e6bbf5d..1055594b22764 100644 +--- a/drivers/iio/light/st_uvis25_core.c ++++ b/drivers/iio/light/st_uvis25_core.c +@@ -232,17 +232,19 @@ static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { + + static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p) + { +- u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)]; + struct iio_poll_func *pf = p; + struct iio_dev *iio_dev = pf->indio_dev; + struct st_uvis25_hw *hw = iio_priv(iio_dev); ++ unsigned int val; + int err; + +- err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer); ++ err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val); + if (err < 0) + goto out; + +- iio_push_to_buffers_with_timestamp(iio_dev, buffer, ++ hw->scan.chan = val; ++ ++ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, + iio_get_time_ns(iio_dev)); + + out: +diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c +index 838b13c8bb3db..c96415a1aeadd 100644 +--- a/drivers/iio/magnetometer/mag3110.c ++++ b/drivers/iio/magnetometer/mag3110.c +@@ -56,6 +56,12 @@ struct mag3110_data { + int sleep_val; + struct regulator *vdd_reg; + struct regulator *vddio_reg; ++ /* Ensure natural alignment of timestamp */ ++ struct { ++ __be16 channels[3]; ++ u8 temperature; ++ s64 ts __aligned(8); ++ } scan; + }; + + static int mag3110_request(struct mag3110_data *data) +@@ -387,10 +393,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mag3110_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */ + int ret; + +- ret = mag3110_read(data, (__be16 *) buffer); ++ ret = mag3110_read(data, data->scan.channels); + if (ret < 0) + goto done; + +@@ -399,10 +404,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) + MAG3110_DIE_TEMP); + if (ret < 0) + goto done; +- buffer[6] = ret; ++ data->scan.temperature = ret; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c +index ccdb0b70e48ca..1eb9e7b29e050 100644 +--- a/drivers/iio/pressure/mpl3115.c ++++ b/drivers/iio/pressure/mpl3115.c +@@ -144,7 +144,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mpl3115_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */ ++ /* ++ * 32-bit channel + 16-bit channel + padding + ts ++ * Note that it is possible for only one of the first 2 ++ * channels to be enabled. If that happens, the first element ++ * of the buffer may be either 16 or 32-bits. As such we cannot ++ * use a simple structure definition to express this data layout. ++ */ ++ u8 buffer[16] __aligned(8); + int ret, pos = 0; + + mutex_lock(&data->lock); +diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c +index f59bf8d585866..410de837d0417 100644 +--- a/drivers/iio/trigger/iio-trig-hrtimer.c ++++ b/drivers/iio/trigger/iio-trig-hrtimer.c +@@ -102,7 +102,7 @@ static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state) + + if (state) + hrtimer_start(&trig_info->timer, trig_info->period, +- HRTIMER_MODE_REL); ++ HRTIMER_MODE_REL_HARD); + else + hrtimer_cancel(&trig_info->timer); + +@@ -132,7 +132,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name) + trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops; + trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups; + +- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + trig_info->timer.function = iio_hrtimer_trig_handler; + + trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY; +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index a77750b8954db..c51b84b2d2f37 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -477,6 +477,10 @@ static void cma_release_dev(struct rdma_id_private *id_priv) + list_del(&id_priv->list); + cma_dev_put(id_priv->cma_dev); + id_priv->cma_dev = NULL; ++ if (id_priv->id.route.addr.dev_addr.sgid_attr) { ++ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); ++ id_priv->id.route.addr.dev_addr.sgid_attr = NULL; ++ } + mutex_unlock(&lock); + } + +@@ -1861,9 +1865,6 @@ static void _destroy_id(struct rdma_id_private *id_priv, + + kfree(id_priv->id.route.path_rec); + +- if (id_priv->id.route.addr.dev_addr.sgid_attr) +- rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); +- + put_net(id_priv->id.route.addr.dev_addr.net); + rdma_restrack_del(&id_priv->res); + kfree(id_priv); +@@ -2495,8 +2496,9 @@ static int cma_listen_handler(struct rdma_cm_id *id, + return id_priv->id.event_handler(id, event); + } + +-static void cma_listen_on_dev(struct rdma_id_private *id_priv, +- struct cma_device *cma_dev) ++static int cma_listen_on_dev(struct rdma_id_private *id_priv, ++ struct cma_device *cma_dev, ++ struct rdma_id_private **to_destroy) + { + struct rdma_id_private *dev_id_priv; + struct net *net = id_priv->id.route.addr.dev_addr.net; +@@ -2504,21 +2506,21 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, + + lockdep_assert_held(&lock); + ++ *to_destroy = NULL; + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) +- return; ++ return 0; + + dev_id_priv = + __rdma_create_id(net, cma_listen_handler, id_priv, + id_priv->id.ps, id_priv->id.qp_type, id_priv); + if (IS_ERR(dev_id_priv)) +- return; ++ return PTR_ERR(dev_id_priv); + + dev_id_priv->state = RDMA_CM_ADDR_BOUND; + memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), + rdma_addr_size(cma_src_addr(id_priv))); + + _cma_attach_to_dev(dev_id_priv, cma_dev); +- list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); + cma_id_get(id_priv); + dev_id_priv->internal_id = 1; + dev_id_priv->afonly = id_priv->afonly; +@@ -2527,19 +2529,42 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, + + ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); + if (ret) +- dev_warn(&cma_dev->device->dev, +- "RDMA CMA: cma_listen_on_dev, error %d\n", ret); ++ goto err_listen; ++ list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); ++ return 0; ++err_listen: ++ /* Caller must destroy this after releasing lock */ ++ *to_destroy = dev_id_priv; ++ dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); ++ return ret; + } + +-static void cma_listen_on_all(struct rdma_id_private *id_priv) ++static int cma_listen_on_all(struct rdma_id_private *id_priv) + { ++ struct rdma_id_private *to_destroy; + struct cma_device *cma_dev; ++ int ret; + + mutex_lock(&lock); + list_add_tail(&id_priv->list, &listen_any_list); +- list_for_each_entry(cma_dev, &dev_list, list) +- cma_listen_on_dev(id_priv, cma_dev); ++ list_for_each_entry(cma_dev, &dev_list, list) { ++ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); ++ if (ret) { ++ /* Prevent racing with cma_process_remove() */ ++ if (to_destroy) ++ list_del_init(&to_destroy->list); ++ goto err_listen; ++ } ++ } + mutex_unlock(&lock); ++ return 0; ++ ++err_listen: ++ list_del(&id_priv->list); ++ mutex_unlock(&lock); ++ if (to_destroy) ++ rdma_destroy_id(&to_destroy->id); ++ return ret; + } + + void rdma_set_service_type(struct rdma_cm_id *id, int tos) +@@ -3692,8 +3717,11 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) + ret = -ENOSYS; + goto err; + } +- } else +- cma_listen_on_all(id_priv); ++ } else { ++ ret = cma_listen_on_all(id_priv); ++ if (ret) ++ goto err; ++ } + + return 0; + err: +@@ -4773,69 +4801,6 @@ static struct notifier_block cma_nb = { + .notifier_call = cma_netdev_callback + }; + +-static int cma_add_one(struct ib_device *device) +-{ +- struct cma_device *cma_dev; +- struct rdma_id_private *id_priv; +- unsigned int i; +- unsigned long supported_gids = 0; +- int ret; +- +- cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); +- if (!cma_dev) +- return -ENOMEM; +- +- cma_dev->device = device; +- cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, +- sizeof(*cma_dev->default_gid_type), +- GFP_KERNEL); +- if (!cma_dev->default_gid_type) { +- ret = -ENOMEM; +- goto free_cma_dev; +- } +- +- cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, +- sizeof(*cma_dev->default_roce_tos), +- GFP_KERNEL); +- if (!cma_dev->default_roce_tos) { +- ret = -ENOMEM; +- goto free_gid_type; +- } +- +- rdma_for_each_port (device, i) { +- supported_gids = roce_gid_type_mask_support(device, i); +- WARN_ON(!supported_gids); +- if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) +- cma_dev->default_gid_type[i - rdma_start_port(device)] = +- CMA_PREFERRED_ROCE_GID_TYPE; +- else +- cma_dev->default_gid_type[i - rdma_start_port(device)] = +- find_first_bit(&supported_gids, BITS_PER_LONG); +- cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; +- } +- +- init_completion(&cma_dev->comp); +- refcount_set(&cma_dev->refcount, 1); +- INIT_LIST_HEAD(&cma_dev->id_list); +- ib_set_client_data(device, &cma_client, cma_dev); +- +- mutex_lock(&lock); +- list_add_tail(&cma_dev->list, &dev_list); +- list_for_each_entry(id_priv, &listen_any_list, list) +- cma_listen_on_dev(id_priv, cma_dev); +- mutex_unlock(&lock); +- +- trace_cm_add_one(device); +- return 0; +- +-free_gid_type: +- kfree(cma_dev->default_gid_type); +- +-free_cma_dev: +- kfree(cma_dev); +- return ret; +-} +- + static void cma_send_device_removal_put(struct rdma_id_private *id_priv) + { + struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; +@@ -4898,6 +4863,80 @@ static void cma_process_remove(struct cma_device *cma_dev) + wait_for_completion(&cma_dev->comp); + } + ++static int cma_add_one(struct ib_device *device) ++{ ++ struct rdma_id_private *to_destroy; ++ struct cma_device *cma_dev; ++ struct rdma_id_private *id_priv; ++ unsigned int i; ++ unsigned long supported_gids = 0; ++ int ret; ++ ++ cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); ++ if (!cma_dev) ++ return -ENOMEM; ++ ++ cma_dev->device = device; ++ cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, ++ sizeof(*cma_dev->default_gid_type), ++ GFP_KERNEL); ++ if (!cma_dev->default_gid_type) { ++ ret = -ENOMEM; ++ goto free_cma_dev; ++ } ++ ++ cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, ++ sizeof(*cma_dev->default_roce_tos), ++ GFP_KERNEL); ++ if (!cma_dev->default_roce_tos) { ++ ret = -ENOMEM; ++ goto free_gid_type; ++ } ++ ++ rdma_for_each_port (device, i) { ++ supported_gids = roce_gid_type_mask_support(device, i); ++ WARN_ON(!supported_gids); ++ if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) ++ cma_dev->default_gid_type[i - rdma_start_port(device)] = ++ CMA_PREFERRED_ROCE_GID_TYPE; ++ else ++ cma_dev->default_gid_type[i - rdma_start_port(device)] = ++ find_first_bit(&supported_gids, BITS_PER_LONG); ++ cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; ++ } ++ ++ init_completion(&cma_dev->comp); ++ refcount_set(&cma_dev->refcount, 1); ++ INIT_LIST_HEAD(&cma_dev->id_list); ++ ib_set_client_data(device, &cma_client, cma_dev); ++ ++ mutex_lock(&lock); ++ list_add_tail(&cma_dev->list, &dev_list); ++ list_for_each_entry(id_priv, &listen_any_list, list) { ++ ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); ++ if (ret) ++ goto free_listen; ++ } ++ mutex_unlock(&lock); ++ ++ trace_cm_add_one(device); ++ return 0; ++ ++free_listen: ++ list_del(&cma_dev->list); ++ mutex_unlock(&lock); ++ ++ /* cma_process_remove() will delete to_destroy */ ++ cma_process_remove(cma_dev); ++ kfree(cma_dev->default_roce_tos); ++free_gid_type: ++ kfree(cma_dev->default_gid_type); ++ ++free_cma_dev: ++ kfree(cma_dev); ++ return ret; ++} ++ + static void cma_remove_one(struct ib_device *device, void *client_data) + { + struct cma_device *cma_dev = client_data; +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c +index a3b1fc84cdcab..4a041511b70ec 100644 +--- a/drivers/infiniband/core/device.c ++++ b/drivers/infiniband/core/device.c +@@ -1374,9 +1374,6 @@ int ib_register_device(struct ib_device *device, const char *name, + } + + ret = enable_device_and_get(device); +- dev_set_uevent_suppress(&device->dev, false); +- /* Mark for userspace that device is ready */ +- kobject_uevent(&device->dev.kobj, KOBJ_ADD); + if (ret) { + void (*dealloc_fn)(struct ib_device *); + +@@ -1396,8 +1393,12 @@ int ib_register_device(struct ib_device *device, const char *name, + ib_device_put(device); + __ib_unregister_device(device); + device->ops.dealloc_driver = dealloc_fn; ++ dev_set_uevent_suppress(&device->dev, false); + return ret; + } ++ dev_set_uevent_suppress(&device->dev, false); ++ /* Mark for userspace that device is ready */ ++ kobject_uevent(&device->dev.kobj, KOBJ_ADD); + ib_device_put(device); + + return 0; +diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c +index 302f898c5833f..9ec6971056fa8 100644 +--- a/drivers/infiniband/core/uverbs_std_types_device.c ++++ b/drivers/infiniband/core/uverbs_std_types_device.c +@@ -317,8 +317,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)( + struct ib_device *ib_dev; + size_t user_entry_size; + ssize_t num_entries; +- size_t max_entries; +- size_t num_bytes; ++ int max_entries; + u32 flags; + int ret; + +@@ -336,19 +335,16 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)( + attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + user_entry_size); + if (max_entries <= 0) +- return -EINVAL; ++ return max_entries ?: -EINVAL; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + +- if (check_mul_overflow(max_entries, sizeof(*entries), &num_bytes)) +- return -EINVAL; +- +- entries = uverbs_zalloc(attrs, num_bytes); +- if (!entries) +- return -ENOMEM; ++ entries = uverbs_kcalloc(attrs, max_entries, sizeof(*entries)); ++ if (IS_ERR(entries)) ++ return PTR_ERR(entries); + + num_entries = rdma_query_gid_table(ib_dev, entries, max_entries); + if (num_entries < 0) +diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c +index 9b22bb553e8b3..dc58564417292 100644 +--- a/drivers/infiniband/core/uverbs_std_types_mr.c ++++ b/drivers/infiniband/core/uverbs_std_types_mr.c +@@ -33,6 +33,7 @@ + #include "rdma_core.h" + #include "uverbs.h" + #include <rdma/uverbs_std_types.h> ++#include "restrack.h" + + static int uverbs_free_mr(struct ib_uobject *uobject, + enum rdma_remove_reason why, +@@ -134,6 +135,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( + atomic_inc(&pd->usecnt); + atomic_inc(&dm->usecnt); + ++ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); ++ rdma_restrack_set_name(&mr->res, NULL); ++ rdma_restrack_add(&mr->res); + uobj->object = mr; + + uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE); +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index 740f8454b6b46..3d895cc41c3ad 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -1698,8 +1698,10 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, + slave = rdma_lag_get_ah_roce_slave(qp->device, + &attr->ah_attr, + GFP_KERNEL); +- if (IS_ERR(slave)) ++ if (IS_ERR(slave)) { ++ ret = PTR_ERR(slave); + goto out_av; ++ } + attr->xmit_slave = slave; + } + } +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index cf3db96283976..266de55f57192 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -1657,8 +1657,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, + srq->qplib_srq.max_wqe = entries; + + srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; +- srq->qplib_srq.wqe_size = +- bnxt_re_get_rwqe_size(srq->qplib_srq.max_sge); ++ /* 128 byte wqe size for SRQ . So use max sges */ ++ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); + srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; + srq->srq_limit = srq_init_attr->attr.srq_limit; + srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; +@@ -2078,6 +2078,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, + goto out; + } + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); ++ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->pkey_index = qplib_qp->pkey_index; +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c +index 28349ed508854..d6cfefc269ee3 100644 +--- a/drivers/infiniband/hw/cxgb4/cq.c ++++ b/drivers/infiniband/hw/cxgb4/cq.c +@@ -1008,6 +1008,9 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + if (attr->flags) + return -EINVAL; + ++ if (entries < 1 || entries > ibdev->attrs.max_cqe) ++ return -EINVAL; ++ + if (vector >= rhp->rdev.lldi.nciq) + return -EINVAL; + +diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c +index 75b06db60f7c2..7dd3b6097226f 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_ah.c ++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c +@@ -31,13 +31,13 @@ + */ + + #include <linux/platform_device.h> ++#include <linux/pci.h> + #include <rdma/ib_addr.h> + #include <rdma/ib_cache.h> + #include "hns_roce_device.h" + +-#define HNS_ROCE_PORT_NUM_SHIFT 24 +-#define HNS_ROCE_VLAN_SL_BIT_MASK 7 +-#define HNS_ROCE_VLAN_SL_SHIFT 13 ++#define VLAN_SL_MASK 7 ++#define VLAN_SL_SHIFT 13 + + static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr) + { +@@ -58,47 +58,44 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr) + int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) + { +- struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); +- const struct ib_gid_attr *gid_attr; +- struct device *dev = hr_dev->dev; +- struct hns_roce_ah *ah = to_hr_ah(ibah); + struct rdma_ah_attr *ah_attr = init_attr->ah_attr; + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); +- u16 vlan_id = 0xffff; +- bool vlan_en = false; +- int ret; +- +- gid_attr = ah_attr->grh.sgid_attr; +- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); +- if (ret) +- return ret; +- +- /* Get mac address */ +- memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); +- +- if (vlan_id < VLAN_N_VID) { +- vlan_en = true; +- vlan_id |= (rdma_ah_get_sl(ah_attr) & +- HNS_ROCE_VLAN_SL_BIT_MASK) << +- HNS_ROCE_VLAN_SL_SHIFT; +- } ++ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); ++ struct hns_roce_ah *ah = to_hr_ah(ibah); ++ int ret = 0; + + ah->av.port = rdma_ah_get_port_num(ah_attr); + ah->av.gid_index = grh->sgid_index; +- ah->av.vlan_id = vlan_id; +- ah->av.vlan_en = vlan_en; +- dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index, +- ah->av.vlan_id); + + if (rdma_ah_get_static_rate(ah_attr)) + ah->av.stat_rate = IB_RATE_10_GBPS; + +- memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); +- ah->av.sl = rdma_ah_get_sl(ah_attr); ++ ah->av.hop_limit = grh->hop_limit; + ah->av.flowlabel = grh->flow_label; + ah->av.udp_sport = get_ah_udp_sport(ah_attr); ++ ah->av.sl = rdma_ah_get_sl(ah_attr); ++ ah->av.tclass = get_tclass(grh); + +- return 0; ++ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); ++ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); ++ ++ /* HIP08 needs to record vlan info in Address Vector */ ++ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { ++ ah->av.vlan_en = 0; ++ ++ ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, ++ &ah->av.vlan_id, NULL); ++ if (ret) ++ return ret; ++ ++ if (ah->av.vlan_id < VLAN_N_VID) { ++ ah->av.vlan_en = 1; ++ ah->av.vlan_id |= (rdma_ah_get_sl(ah_attr) & VLAN_SL_MASK) << ++ VLAN_SL_SHIFT; ++ } ++ } ++ ++ return ret; + } + + int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c +index 809b22aa5056c..da346129f6e9e 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_cq.c ++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c +@@ -274,7 +274,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, + + if (udata) { + ret = ib_copy_from_udata(&ucmd, udata, +- min(sizeof(ucmd), udata->inlen)); ++ min(udata->inlen, sizeof(ucmd))); + if (ret) { + ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n", + ret); +@@ -313,7 +313,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, + + if (udata) { + resp.cqn = hr_cq->cqn; +- ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); ++ ret = ib_copy_to_udata(udata, &resp, ++ min(udata->outlen, sizeof(resp))); + if (ret) + goto err_cqc; + } +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h +index 6d2acff69f982..1ea87f92aabbe 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h +@@ -547,7 +547,7 @@ struct hns_roce_av { + u8 dgid[HNS_ROCE_GID_SIZE]; + u8 mac[ETH_ALEN]; + u16 vlan_id; +- bool vlan_en; ++ u8 vlan_en; + }; + + struct hns_roce_ah { +@@ -1132,6 +1132,14 @@ static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift) + return ilog2(to_hr_hem_entries_count(count, buf_shift)); + } + ++#define DSCP_SHIFT 2 ++ ++static inline u8 get_tclass(const struct ib_global_route *grh) ++{ ++ return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ? ++ grh->traffic_class >> DSCP_SHIFT : grh->traffic_class; ++} ++ + int hns_roce_init_uar_table(struct hns_roce_dev *dev); + int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); + void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar); +diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c +index 7487cf3d2c37a..66f9f036ef946 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hem.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c +@@ -1017,7 +1017,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, + + void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) + { +- if (hr_dev->caps.srqc_entry_sz) ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->srq_table.table); + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); +@@ -1027,7 +1027,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) + if (hr_dev->caps.cqc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->cqc_timer_table); +- if (hr_dev->caps.sccc_sz) ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table); + if (hr_dev->caps.trrl_entry_sz) +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +index 0468028ffe390..5c29c7d8c50e6 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +@@ -214,25 +214,20 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, + return 0; + } + +-static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, +- unsigned int *sge_ind, unsigned int valid_num_sge) ++static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge, ++ unsigned int *sge_ind, unsigned int cnt) + { + struct hns_roce_v2_wqe_data_seg *dseg; +- unsigned int cnt = valid_num_sge; +- struct ib_sge *sge = wr->sg_list; + unsigned int idx = *sge_ind; + +- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { +- cnt -= HNS_ROCE_SGE_IN_WQE; +- sge += HNS_ROCE_SGE_IN_WQE; +- } +- + while (cnt > 0) { + dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); +- set_data_seg_v2(dseg, sge); +- idx++; ++ if (likely(sge->length)) { ++ set_data_seg_v2(dseg, sge); ++ idx++; ++ cnt--; ++ } + sge++; +- cnt--; + } + + *sge_ind = idx; +@@ -340,7 +335,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, + } + } + +- set_extend_sge(qp, wr, sge_ind, valid_num_sge); ++ set_extend_sge(qp, wr->sg_list + i, sge_ind, ++ valid_num_sge - HNS_ROCE_SGE_IN_WQE); + } + + roce_set_field(rc_sq_wqe->byte_16, +@@ -433,8 +429,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, + unsigned int curr_idx = *sge_idx; + int valid_num_sge; + u32 msg_len = 0; +- bool loopback; +- u8 *smac; + int ret; + + valid_num_sge = calc_wr_sge_num(wr, &msg_len); +@@ -457,13 +451,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, + roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M, + V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]); + +- /* MAC loopback */ +- smac = (u8 *)hr_dev->dev_addr[qp->port]; +- loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0; +- +- roce_set_bit(ud_sq_wqe->byte_40, +- V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); +- + ud_sq_wqe->msg_len = cpu_to_le32(msg_len); + + /* Set sig attr */ +@@ -495,8 +482,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, + roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, + V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); + +- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, +- V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); + roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, + V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); + roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, +@@ -508,14 +493,21 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, + roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M, + V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); + +- roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, +- ah->av.vlan_en ? 1 : 0); + roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, + V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index); + ++ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { ++ roce_set_bit(ud_sq_wqe->byte_40, ++ V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, ++ ah->av.vlan_en); ++ roce_set_field(ud_sq_wqe->byte_36, ++ V2_UD_SEND_WQE_BYTE_36_VLAN_M, ++ V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); ++ } ++ + memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2); + +- set_extend_sge(qp, wr, &curr_idx, valid_num_sge); ++ set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge); + + *sge_idx = curr_idx; + +@@ -4468,15 +4460,11 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, + V2_QPC_BYTE_24_HOP_LIMIT_S, 0); + +- if (is_udp) +- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, +- V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); +- else +- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, +- V2_QPC_BYTE_24_TC_S, grh->traffic_class); +- ++ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, ++ V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh)); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, 0); ++ + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, + V2_QPC_BYTE_28_FL_S, grh->flow_label); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, +diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c +index afeffafc59f90..ae721fa61e0e4 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_main.c ++++ b/drivers/infiniband/hw/hns/hns_roce_main.c +@@ -325,7 +325,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, + + resp.cqe_size = hr_dev->caps.cqe_sz; + +- ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); ++ ret = ib_copy_to_udata(udata, &resp, ++ min(udata->outlen, sizeof(resp))); + if (ret) + goto error_fail_copy_to_udata; + +@@ -631,7 +632,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) + goto err_unmap_trrl; + } + +- if (hr_dev->caps.srqc_entry_sz) { ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { + ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, + HEM_TYPE_SRQC, + hr_dev->caps.srqc_entry_sz, +@@ -643,7 +644,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) + } + } + +- if (hr_dev->caps.sccc_sz) { ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table, + HEM_TYPE_SCCC, +@@ -687,11 +688,11 @@ err_unmap_qpc_timer: + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table); + + err_unmap_ctx: +- if (hr_dev->caps.sccc_sz) ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table); + err_unmap_srq: +- if (hr_dev->caps.srqc_entry_sz) ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); + + err_unmap_cq: +diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c +index 98f69496adb49..f78fa1d3d8075 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_pd.c ++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c +@@ -70,16 +70,17 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) + } + + if (udata) { +- struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; ++ struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn}; + +- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { ++ ret = ib_copy_to_udata(udata, &resp, ++ min(udata->outlen, sizeof(resp))); ++ if (ret) { + hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); +- ibdev_err(ib_dev, "failed to copy to udata\n"); +- return -EFAULT; ++ ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); + } + } + +- return 0; ++ return ret; + } + + int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c +index 6c081dd985fc9..ef1452215b17d 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c +@@ -286,7 +286,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) + } + } + +- if (hr_dev->caps.sccc_sz) { ++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { + /* Alloc memory for SCC CTX */ + ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, + hr_qp->qpn); +@@ -432,7 +432,12 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, + } + + hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; +- hr_qp->sge.sge_cnt = cnt; ++ ++ /* If the number of extended sge is not zero, they MUST use the ++ * space of HNS_HW_PAGE_SIZE at least. ++ */ ++ hr_qp->sge.sge_cnt = cnt ? ++ max(cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE) : 0; + + return 0; + } +@@ -860,9 +865,12 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + } + + if (udata) { +- if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) { +- ibdev_err(ibdev, "Failed to copy QP ucmd\n"); +- return -EFAULT; ++ ret = ib_copy_from_udata(ucmd, udata, ++ min(udata->inlen, sizeof(*ucmd))); ++ if (ret) { ++ ibdev_err(ibdev, ++ "failed to copy QP ucmd, ret = %d\n", ret); ++ return ret; + } + + ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); +diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c +index 8caf74e44efd9..75d74f4bb52c9 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_srq.c ++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c +@@ -300,7 +300,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, + srq->max_gs = init_attr->attr.max_sge; + + if (udata) { +- ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); ++ ret = ib_copy_from_udata(&ucmd, udata, ++ min(udata->inlen, sizeof(ucmd))); + if (ret) { + ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n", + ret); +@@ -343,11 +344,10 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, + resp.srqn = srq->srqn; + + if (udata) { +- if (ib_copy_to_udata(udata, &resp, +- min(udata->outlen, sizeof(resp)))) { +- ret = -EFAULT; ++ ret = ib_copy_to_udata(udata, &resp, ++ min(udata->outlen, sizeof(resp))); ++ if (ret) + goto err_srqc_alloc; +- } + } + + return 0; +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index b261797b258fd..971694e781b65 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -642,6 +642,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) + if (mlx5_mr_cache_invalidate(mr)) { + detach_mr_from_cache(mr); + destroy_mkey(dev, mr); ++ kfree(mr); + return; + } + +@@ -1247,10 +1248,8 @@ err_1: + } + + static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, +- int npages, u64 length, int access_flags) ++ u64 length, int access_flags) + { +- mr->npages = npages; +- atomic_add(npages, &dev->mdev->priv.reg_pages); + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; + mr->ibmr.length = length; +@@ -1290,8 +1289,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, + + kfree(in); + +- mr->umem = NULL; +- set_mr_fields(dev, mr, 0, length, acc); ++ set_mr_fields(dev, mr, length, acc); + + return &mr->ibmr; + +@@ -1419,7 +1417,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); + + mr->umem = umem; +- set_mr_fields(dev, mr, npages, length, access_flags); ++ mr->npages = npages; ++ atomic_add(mr->npages, &dev->mdev->priv.reg_pages); ++ set_mr_fields(dev, mr, length, access_flags); + + if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) { + /* +@@ -1531,8 +1531,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", + start, virt_addr, length, access_flags); + +- atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); +- + if (!mr->umem) + return -EINVAL; + +@@ -1553,12 +1551,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + * used. + */ + flags |= IB_MR_REREG_TRANS; ++ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); ++ mr->npages = 0; + ib_umem_release(mr->umem); + mr->umem = NULL; ++ + err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, + &npages, &page_shift, &ncont, &order); + if (err) + goto err; ++ mr->npages = ncont; ++ atomic_add(mr->npages, &dev->mdev->priv.reg_pages); + } + + if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, +@@ -1609,7 +1612,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + goto err; + } + +- set_mr_fields(dev, mr, npages, len, access_flags); ++ set_mr_fields(dev, mr, len, access_flags); + + return 0; + +diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c +index 119b2573c9a08..26c3408dcacae 100644 +--- a/drivers/infiniband/hw/mthca/mthca_cq.c ++++ b/drivers/infiniband/hw/mthca/mthca_cq.c +@@ -604,7 +604,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev, + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; + break; + default: +- entry->opcode = MTHCA_OPCODE_INVALID; ++ entry->opcode = 0xFF; + break; + } + } else { +diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h +index 9dbbf4d16796a..a445160de3e16 100644 +--- a/drivers/infiniband/hw/mthca/mthca_dev.h ++++ b/drivers/infiniband/hw/mthca/mthca_dev.h +@@ -105,7 +105,6 @@ enum { + MTHCA_OPCODE_ATOMIC_CS = 0x11, + MTHCA_OPCODE_ATOMIC_FA = 0x12, + MTHCA_OPCODE_BIND_MW = 0x18, +- MTHCA_OPCODE_INVALID = 0xff + }; + + enum { +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c +index af3923bf0a36b..d4917646641aa 100644 +--- a/drivers/infiniband/sw/rxe/rxe_req.c ++++ b/drivers/infiniband/sw/rxe/rxe_req.c +@@ -634,7 +634,8 @@ next_wqe: + } + + if (unlikely(qp_type(qp) == IB_QPT_RC && +- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) { ++ psn_compare(qp->req.psn, (qp->comp.psn + ++ RXE_MAX_UNACKED_PSNS)) > 0)) { + qp->req.wait_psn = 1; + goto exit; + } +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +index f298adc02acba..d54a77ebe1184 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +@@ -1640,10 +1640,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) + return err; + } + err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS); +- if (err) { ++ if (err) + rtrs_err(s, "Resolving route failed, err: %d\n", err); +- destroy_con_cq_qp(con); +- } + + return err; + } +@@ -1837,8 +1835,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, + cm_err = rtrs_rdma_route_resolved(con); + break; + case RDMA_CM_EVENT_ESTABLISHED: +- con->cm_err = rtrs_rdma_conn_established(con, ev); +- if (likely(!con->cm_err)) { ++ cm_err = rtrs_rdma_conn_established(con, ev); ++ if (likely(!cm_err)) { + /* + * Report success and wake up. Here we abuse state_wq, + * i.e. wake up without state change, but we set cm_err. +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +index d6f93601712e4..1cb778aff3c59 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +@@ -1328,17 +1328,42 @@ static void rtrs_srv_dev_release(struct device *dev) + kfree(srv); + } + +-static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, +- const uuid_t *paths_uuid) ++static void free_srv(struct rtrs_srv *srv) ++{ ++ int i; ++ ++ WARN_ON(refcount_read(&srv->refcount)); ++ for (i = 0; i < srv->queue_depth; i++) ++ mempool_free(srv->chunks[i], chunk_pool); ++ kfree(srv->chunks); ++ mutex_destroy(&srv->paths_mutex); ++ mutex_destroy(&srv->paths_ev_mutex); ++ /* last put to release the srv structure */ ++ put_device(&srv->dev); ++} ++ ++static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx, ++ const uuid_t *paths_uuid) + { + struct rtrs_srv *srv; + int i; + ++ mutex_lock(&ctx->srv_mutex); ++ list_for_each_entry(srv, &ctx->srv_list, ctx_list) { ++ if (uuid_equal(&srv->paths_uuid, paths_uuid) && ++ refcount_inc_not_zero(&srv->refcount)) { ++ mutex_unlock(&ctx->srv_mutex); ++ return srv; ++ } ++ } ++ ++ /* need to allocate a new srv */ + srv = kzalloc(sizeof(*srv), GFP_KERNEL); +- if (!srv) ++ if (!srv) { ++ mutex_unlock(&ctx->srv_mutex); + return NULL; ++ } + +- refcount_set(&srv->refcount, 1); + INIT_LIST_HEAD(&srv->paths_list); + mutex_init(&srv->paths_mutex); + mutex_init(&srv->paths_ev_mutex); +@@ -1347,6 +1372,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, + srv->ctx = ctx; + device_initialize(&srv->dev); + srv->dev.release = rtrs_srv_dev_release; ++ list_add(&srv->ctx_list, &ctx->srv_list); ++ mutex_unlock(&ctx->srv_mutex); + + srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), + GFP_KERNEL); +@@ -1358,7 +1385,7 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, + if (!srv->chunks[i]) + goto err_free_chunks; + } +- list_add(&srv->ctx_list, &ctx->srv_list); ++ refcount_set(&srv->refcount, 1); + + return srv; + +@@ -1369,52 +1396,9 @@ err_free_chunks: + + err_free_srv: + kfree(srv); +- + return NULL; + } + +-static void free_srv(struct rtrs_srv *srv) +-{ +- int i; +- +- WARN_ON(refcount_read(&srv->refcount)); +- for (i = 0; i < srv->queue_depth; i++) +- mempool_free(srv->chunks[i], chunk_pool); +- kfree(srv->chunks); +- mutex_destroy(&srv->paths_mutex); +- mutex_destroy(&srv->paths_ev_mutex); +- /* last put to release the srv structure */ +- put_device(&srv->dev); +-} +- +-static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx, +- const uuid_t *paths_uuid) +-{ +- struct rtrs_srv *srv; +- +- list_for_each_entry(srv, &ctx->srv_list, ctx_list) { +- if (uuid_equal(&srv->paths_uuid, paths_uuid) && +- refcount_inc_not_zero(&srv->refcount)) +- return srv; +- } +- +- return NULL; +-} +- +-static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx, +- const uuid_t *paths_uuid) +-{ +- struct rtrs_srv *srv; +- +- mutex_lock(&ctx->srv_mutex); +- srv = __find_srv_and_get(ctx, paths_uuid); +- if (!srv) +- srv = __alloc_srv(ctx, paths_uuid); +- mutex_unlock(&ctx->srv_mutex); +- +- return srv; +-} +- + static void put_srv(struct rtrs_srv *srv) + { + if (refcount_dec_and_test(&srv->refcount)) { +@@ -1813,7 +1797,11 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, + } + recon_cnt = le16_to_cpu(msg->recon_cnt); + srv = get_or_create_srv(ctx, &msg->paths_uuid); +- if (!srv) { ++ /* ++ * "refcount == 0" happens if a previous thread calls get_or_create_srv ++ * allocate srv, but chunks of srv are not allocated yet. ++ */ ++ if (!srv || refcount_read(&srv->refcount) == 0) { + err = -ENOMEM; + goto reject_w_err; + } +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c +index d6c924032aaa8..dd16f7b3c7ef6 100644 +--- a/drivers/input/keyboard/omap4-keypad.c ++++ b/drivers/input/keyboard/omap4-keypad.c +@@ -186,12 +186,8 @@ static int omap4_keypad_open(struct input_dev *input) + return 0; + } + +-static void omap4_keypad_close(struct input_dev *input) ++static void omap4_keypad_stop(struct omap4_keypad *keypad_data) + { +- struct omap4_keypad *keypad_data = input_get_drvdata(input); +- +- disable_irq(keypad_data->irq); +- + /* Disable interrupts and wake-up events */ + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, + OMAP4_VAL_IRQDISABLE); +@@ -200,7 +196,15 @@ static void omap4_keypad_close(struct input_dev *input) + /* clear pending interrupts */ + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, + kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); ++} ++ ++static void omap4_keypad_close(struct input_dev *input) ++{ ++ struct omap4_keypad *keypad_data; + ++ keypad_data = input_get_drvdata(input); ++ disable_irq(keypad_data->irq); ++ omap4_keypad_stop(keypad_data); + enable_irq(keypad_data->irq); + + pm_runtime_put_sync(input->dev.parent); +@@ -223,13 +227,37 @@ static int omap4_keypad_parse_dt(struct device *dev, + return 0; + } + ++static int omap4_keypad_check_revision(struct device *dev, ++ struct omap4_keypad *keypad_data) ++{ ++ unsigned int rev; ++ ++ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION); ++ rev &= 0x03 << 30; ++ rev >>= 30; ++ switch (rev) { ++ case KBD_REVISION_OMAP4: ++ keypad_data->reg_offset = 0x00; ++ keypad_data->irqreg_offset = 0x00; ++ break; ++ case KBD_REVISION_OMAP5: ++ keypad_data->reg_offset = 0x10; ++ keypad_data->irqreg_offset = 0x0c; ++ break; ++ default: ++ dev_err(dev, "Keypad reports unsupported revision %d", rev); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + static int omap4_keypad_probe(struct platform_device *pdev) + { + struct omap4_keypad *keypad_data; + struct input_dev *input_dev; + struct resource *res; + unsigned int max_keys; +- int rev; + int irq; + int error; + +@@ -269,41 +297,33 @@ static int omap4_keypad_probe(struct platform_device *pdev) + goto err_release_mem; + } + ++ pm_runtime_enable(&pdev->dev); + + /* + * Enable clocks for the keypad module so that we can read + * revision register. + */ +- pm_runtime_enable(&pdev->dev); + error = pm_runtime_get_sync(&pdev->dev); + if (error) { + dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n"); +- goto err_unmap; +- } +- rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION); +- rev &= 0x03 << 30; +- rev >>= 30; +- switch (rev) { +- case KBD_REVISION_OMAP4: +- keypad_data->reg_offset = 0x00; +- keypad_data->irqreg_offset = 0x00; +- break; +- case KBD_REVISION_OMAP5: +- keypad_data->reg_offset = 0x10; +- keypad_data->irqreg_offset = 0x0c; +- break; +- default: +- dev_err(&pdev->dev, +- "Keypad reports unsupported revision %d", rev); +- error = -EINVAL; +- goto err_pm_put_sync; ++ pm_runtime_put_noidle(&pdev->dev); ++ } else { ++ error = omap4_keypad_check_revision(&pdev->dev, ++ keypad_data); ++ if (!error) { ++ /* Ensure device does not raise interrupts */ ++ omap4_keypad_stop(keypad_data); ++ } ++ pm_runtime_put_sync(&pdev->dev); + } ++ if (error) ++ goto err_pm_disable; + + /* input device allocation */ + keypad_data->input = input_dev = input_allocate_device(); + if (!input_dev) { + error = -ENOMEM; +- goto err_pm_put_sync; ++ goto err_pm_disable; + } + + input_dev->name = pdev->name; +@@ -349,28 +369,25 @@ static int omap4_keypad_probe(struct platform_device *pdev) + goto err_free_keymap; + } + +- device_init_wakeup(&pdev->dev, true); +- pm_runtime_put_sync(&pdev->dev); +- + error = input_register_device(keypad_data->input); + if (error < 0) { + dev_err(&pdev->dev, "failed to register input device\n"); +- goto err_pm_disable; ++ goto err_free_irq; + } + ++ device_init_wakeup(&pdev->dev, true); + platform_set_drvdata(pdev, keypad_data); ++ + return 0; + +-err_pm_disable: +- pm_runtime_disable(&pdev->dev); ++err_free_irq: + free_irq(keypad_data->irq, keypad_data); + err_free_keymap: + kfree(keypad_data->keymap); + err_free_input: + input_free_device(input_dev); +-err_pm_put_sync: +- pm_runtime_put_sync(&pdev->dev); +-err_unmap: ++err_pm_disable: ++ pm_runtime_disable(&pdev->dev); + iounmap(keypad_data->base); + err_release_mem: + release_mem_region(res->start, resource_size(res)); +diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c +index 7eba66fbef580..812edfced86ee 100644 +--- a/drivers/input/mouse/cyapa_gen6.c ++++ b/drivers/input/mouse/cyapa_gen6.c +@@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa, + + memset(&cmd, 0, sizeof(cmd)); + put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr); +- put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2); ++ put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length); + cmd.head.report_id = PIP_APP_CMD_REPORT_ID; + cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE; + put_unaligned_le16(read_offset, &cmd.read_offset); +diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c +index 8fd7fc39c4fd7..ff97897feaf2a 100644 +--- a/drivers/input/touchscreen/ads7846.c ++++ b/drivers/input/touchscreen/ads7846.c +@@ -33,6 +33,7 @@ + #include <linux/regulator/consumer.h> + #include <linux/module.h> + #include <asm/irq.h> ++#include <asm/unaligned.h> + + /* + * This code has been heavily tested on a Nokia 770, and lightly +@@ -199,6 +200,26 @@ struct ads7846 { + #define REF_ON (READ_12BIT_DFR(x, 1, 1)) + #define REF_OFF (READ_12BIT_DFR(y, 0, 0)) + ++static int get_pendown_state(struct ads7846 *ts) ++{ ++ if (ts->get_pendown_state) ++ return ts->get_pendown_state(); ++ ++ return !gpio_get_value(ts->gpio_pendown); ++} ++ ++static void ads7846_report_pen_up(struct ads7846 *ts) ++{ ++ struct input_dev *input = ts->input; ++ ++ input_report_key(input, BTN_TOUCH, 0); ++ input_report_abs(input, ABS_PRESSURE, 0); ++ input_sync(input); ++ ++ ts->pendown = false; ++ dev_vdbg(&ts->spi->dev, "UP\n"); ++} ++ + /* Must be called with ts->lock held */ + static void ads7846_stop(struct ads7846 *ts) + { +@@ -215,6 +236,10 @@ static void ads7846_stop(struct ads7846 *ts) + static void ads7846_restart(struct ads7846 *ts) + { + if (!ts->disabled && !ts->suspended) { ++ /* Check if pen was released since last stop */ ++ if (ts->pendown && !get_pendown_state(ts)) ++ ads7846_report_pen_up(ts); ++ + /* Tell IRQ thread that it may poll the device. */ + ts->stopped = false; + mb(); +@@ -411,7 +436,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command) + + if (status == 0) { + /* BE12 value, then padding */ +- status = be16_to_cpu(*((u16 *)&req->sample[1])); ++ status = get_unaligned_be16(&req->sample[1]); + status = status >> 3; + status &= 0x0fff; + } +@@ -606,14 +631,6 @@ static const struct attribute_group ads784x_attr_group = { + + /*--------------------------------------------------------------------------*/ + +-static int get_pendown_state(struct ads7846 *ts) +-{ +- if (ts->get_pendown_state) +- return ts->get_pendown_state(); +- +- return !gpio_get_value(ts->gpio_pendown); +-} +- + static void null_wait_for_sync(void) + { + } +@@ -786,10 +803,11 @@ static void ads7846_report_state(struct ads7846 *ts) + /* compute touch pressure resistance using equation #2 */ + Rt = z2; + Rt -= z1; +- Rt *= x; + Rt *= ts->x_plate_ohms; ++ Rt = DIV_ROUND_CLOSEST(Rt, 16); ++ Rt *= x; + Rt /= z1; +- Rt = (Rt + 2047) >> 12; ++ Rt = DIV_ROUND_CLOSEST(Rt, 256); + } else { + Rt = 0; + } +@@ -868,16 +886,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle) + msecs_to_jiffies(TS_POLL_PERIOD)); + } + +- if (ts->pendown && !ts->stopped) { +- struct input_dev *input = ts->input; +- +- input_report_key(input, BTN_TOUCH, 0); +- input_report_abs(input, ABS_PRESSURE, 0); +- input_sync(input); +- +- ts->pendown = false; +- dev_vdbg(&ts->spi->dev, "UP\n"); +- } ++ if (ts->pendown && !ts->stopped) ++ ads7846_report_pen_up(ts); + + return IRQ_HANDLED; + } +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +index 702fbaa6c9ada..ef37ccfa82562 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +@@ -10,8 +10,15 @@ + + struct qcom_smmu { + struct arm_smmu_device smmu; ++ bool bypass_quirk; ++ u8 bypass_cbndx; + }; + ++static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) ++{ ++ return container_of(smmu, struct qcom_smmu, smmu); ++} ++ + static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { + { .compatible = "qcom,adreno" }, + { .compatible = "qcom,mdp4" }, +@@ -23,6 +30,87 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { + { } + }; + ++static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) ++{ ++ unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); ++ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); ++ u32 reg; ++ u32 smr; ++ int i; ++ ++ /* ++ * With some firmware versions writes to S2CR of type FAULT are ++ * ignored, and writing BYPASS will end up written as FAULT in the ++ * register. Perform a write to S2CR to detect if this is the case and ++ * if so reserve a context bank to emulate bypass streams. ++ */ ++ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) | ++ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) | ++ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT); ++ arm_smmu_gr0_write(smmu, last_s2cr, reg); ++ reg = arm_smmu_gr0_read(smmu, last_s2cr); ++ if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) { ++ qsmmu->bypass_quirk = true; ++ qsmmu->bypass_cbndx = smmu->num_context_banks - 1; ++ ++ set_bit(qsmmu->bypass_cbndx, smmu->context_map); ++ ++ reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); ++ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); ++ } ++ ++ for (i = 0; i < smmu->num_mapping_groups; i++) { ++ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); ++ ++ if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { ++ smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); ++ smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); ++ smmu->smrs[i].valid = true; ++ ++ smmu->s2crs[i].type = S2CR_TYPE_BYPASS; ++ smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT; ++ smmu->s2crs[i].cbndx = 0xff; ++ } ++ } ++ ++ return 0; ++} ++ ++static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) ++{ ++ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; ++ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); ++ u32 cbndx = s2cr->cbndx; ++ u32 type = s2cr->type; ++ u32 reg; ++ ++ if (qsmmu->bypass_quirk) { ++ if (type == S2CR_TYPE_BYPASS) { ++ /* ++ * Firmware with quirky S2CR handling will substitute ++ * BYPASS writes with FAULT, so point the stream to the ++ * reserved context bank and ask for translation on the ++ * stream ++ */ ++ type = S2CR_TYPE_TRANS; ++ cbndx = qsmmu->bypass_cbndx; ++ } else if (type == S2CR_TYPE_FAULT) { ++ /* ++ * Firmware with quirky S2CR handling will ignore FAULT ++ * writes, so trick it to write FAULT by asking for a ++ * BYPASS. ++ */ ++ type = S2CR_TYPE_BYPASS; ++ cbndx = 0xff; ++ } ++ } ++ ++ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) | ++ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) | ++ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); ++ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); ++} ++ + static int qcom_smmu_def_domain_type(struct device *dev) + { + const struct of_device_id *match = +@@ -61,8 +149,10 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu) + } + + static const struct arm_smmu_impl qcom_smmu_impl = { ++ .cfg_probe = qcom_smmu_cfg_probe, + .def_domain_type = qcom_smmu_def_domain_type, + .reset = qcom_smmu500_reset, ++ .write_s2cr = qcom_smmu_write_s2cr, + }; + + struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c +index dad7fa86fbd4c..bcbacf22331d6 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c +@@ -929,9 +929,16 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) + static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) + { + struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; +- u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | +- FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | +- FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); ++ u32 reg; ++ ++ if (smmu->impl && smmu->impl->write_s2cr) { ++ smmu->impl->write_s2cr(smmu, idx); ++ return; ++ } ++ ++ reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | ++ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | ++ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); + + if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && + smmu->smrs[idx].valid) +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h +index 1a746476927c9..b71647eaa319b 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h +@@ -436,6 +436,7 @@ struct arm_smmu_impl { + int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu, + struct device *dev, int start); ++ void (*write_s2cr)(struct arm_smmu_device *smmu, int idx); + }; + + #define INVALID_SMENDX -1 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index a49afa11673cc..c9da9e93f545c 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -5387,6 +5387,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, + aux_domain_remove_dev(to_dmar_domain(domain), dev); + } + ++#ifdef CONFIG_INTEL_IOMMU_SVM + /* + * 2D array for converting and sanitizing IOMMU generic TLB granularity to + * VT-d granularity. Invalidation is typically included in the unmap operation +@@ -5433,7 +5434,6 @@ static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules) + return order_base_2(nr_pages); + } + +-#ifdef CONFIG_INTEL_IOMMU_SVM + static int + intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, + struct iommu_cache_invalidate_info *inv_info) +diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c +index 23a3b877f7f1d..ede02dc2bcd0b 100644 +--- a/drivers/irqchip/irq-alpine-msi.c ++++ b/drivers/irqchip/irq-alpine-msi.c +@@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, + return 0; + + err_sgi: +- while (--i >= 0) +- irq_domain_free_irqs_parent(domain, virq, i); ++ irq_domain_free_irqs_parent(domain, virq, i - 1); + alpine_msix_free_sgi(priv, sgi, nr_irqs); + return err; + } +diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c +index b2ab8db439d92..532d0ae172d9f 100644 +--- a/drivers/irqchip/irq-ti-sci-inta.c ++++ b/drivers/irqchip/irq-ti-sci-inta.c +@@ -726,7 +726,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) + INIT_LIST_HEAD(&inta->vint_list); + mutex_init(&inta->vint_mutex); + +- dev_info(dev, "Interrupt Aggregator domain %d created\n", pdev->id); ++ dev_info(dev, "Interrupt Aggregator domain %d created\n", inta->ti_sci_id); + + return 0; + } +diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c +index ac9d6d658e65c..fe8fad22bcf96 100644 +--- a/drivers/irqchip/irq-ti-sci-intr.c ++++ b/drivers/irqchip/irq-ti-sci-intr.c +@@ -129,7 +129,7 @@ static void ti_sci_intr_irq_domain_free(struct irq_domain *domain, + * @virq: Corresponding Linux virtual IRQ number + * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain + * +- * Returns parent irq if all went well else appropriate error pointer. ++ * Returns intr output irq if all went well else appropriate error pointer. + */ + static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, + unsigned int virq, u32 hwirq) +@@ -173,7 +173,7 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, + if (err) + goto err_msg; + +- return p_hwirq; ++ return out_irq; + + err_msg: + irq_domain_free_irqs_parent(domain, virq, 1); +@@ -198,19 +198,19 @@ static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain, + struct irq_fwspec *fwspec = data; + unsigned long hwirq; + unsigned int flags; +- int err, p_hwirq; ++ int err, out_irq; + + err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags); + if (err) + return err; + +- p_hwirq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq); +- if (p_hwirq < 0) +- return p_hwirq; ++ out_irq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq); ++ if (out_irq < 0) ++ return out_irq; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &ti_sci_intr_irq_chip, +- (void *)(uintptr_t)p_hwirq); ++ (void *)(uintptr_t)out_irq); + + return 0; + } +diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c +index bd39e9de6ecf7..5dc63c20b67ea 100644 +--- a/drivers/irqchip/qcom-pdc.c ++++ b/drivers/irqchip/qcom-pdc.c +@@ -159,6 +159,8 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) + { + int pin_out = d->hwirq; + enum pdc_irq_config_bits pdc_type; ++ enum pdc_irq_config_bits old_pdc_type; ++ int ret; + + if (pin_out == GPIO_NO_WAKE_IRQ) + return 0; +@@ -187,9 +189,26 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) + return -EINVAL; + } + ++ old_pdc_type = pdc_reg_read(IRQ_i_CFG, pin_out); + pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type); + +- return irq_chip_set_type_parent(d, type); ++ ret = irq_chip_set_type_parent(d, type); ++ if (ret) ++ return ret; ++ ++ /* ++ * When we change types the PDC can give a phantom interrupt. ++ * Clear it. Specifically the phantom shows up when reconfiguring ++ * polarity of interrupt without changing the state of the signal ++ * but let's be consistent and clear it always. ++ * ++ * Doing this works because we have IRQCHIP_SET_TYPE_MASKED so the ++ * interrupt will be cleared before the rest of the system sees it. ++ */ ++ if (old_pdc_type != pdc_type) ++ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false); ++ ++ return 0; + } + + static struct irq_chip qcom_pdc_gic_chip = { +diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c +index 5fb4f24aeb2e8..f13117eed976d 100644 +--- a/drivers/leds/leds-lp50xx.c ++++ b/drivers/leds/leds-lp50xx.c +@@ -487,8 +487,10 @@ static int lp50xx_probe_dt(struct lp50xx *priv) + */ + mc_led_info = devm_kcalloc(priv->dev, LP50XX_LEDS_PER_MODULE, + sizeof(*mc_led_info), GFP_KERNEL); +- if (!mc_led_info) +- return -ENOMEM; ++ if (!mc_led_info) { ++ ret = -ENOMEM; ++ goto child_out; ++ } + + fwnode_for_each_child_node(child, led_node) { + ret = fwnode_property_read_u32(led_node, "color", +diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c +index e6fd47365b588..68fbf0b66fadd 100644 +--- a/drivers/leds/leds-netxbig.c ++++ b/drivers/leds/leds-netxbig.c +@@ -448,31 +448,39 @@ static int netxbig_leds_get_of_pdata(struct device *dev, + gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL); + if (!gpio_ext) { + of_node_put(gpio_ext_np); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto put_device; + } + ret = netxbig_gpio_ext_get(dev, gpio_ext_dev, gpio_ext); + of_node_put(gpio_ext_np); + if (ret) +- return ret; ++ goto put_device; + pdata->gpio_ext = gpio_ext; + + /* Timers (optional) */ + ret = of_property_count_u32_elems(np, "timers"); + if (ret > 0) { +- if (ret % 3) +- return -EINVAL; ++ if (ret % 3) { ++ ret = -EINVAL; ++ goto put_device; ++ } ++ + num_timers = ret / 3; + timers = devm_kcalloc(dev, num_timers, sizeof(*timers), + GFP_KERNEL); +- if (!timers) +- return -ENOMEM; ++ if (!timers) { ++ ret = -ENOMEM; ++ goto put_device; ++ } + for (i = 0; i < num_timers; i++) { + u32 tmp; + + of_property_read_u32_index(np, "timers", 3 * i, + &timers[i].mode); +- if (timers[i].mode >= NETXBIG_LED_MODE_NUM) +- return -EINVAL; ++ if (timers[i].mode >= NETXBIG_LED_MODE_NUM) { ++ ret = -EINVAL; ++ goto put_device; ++ } + of_property_read_u32_index(np, "timers", + 3 * i + 1, &tmp); + timers[i].delay_on = tmp; +@@ -488,12 +496,15 @@ static int netxbig_leds_get_of_pdata(struct device *dev, + num_leds = of_get_available_child_count(np); + if (!num_leds) { + dev_err(dev, "No LED subnodes found in DT\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put_device; + } + + leds = devm_kcalloc(dev, num_leds, sizeof(*leds), GFP_KERNEL); +- if (!leds) +- return -ENOMEM; ++ if (!leds) { ++ ret = -ENOMEM; ++ goto put_device; ++ } + + led = leds; + for_each_available_child_of_node(np, child) { +@@ -574,6 +585,8 @@ static int netxbig_leds_get_of_pdata(struct device *dev, + + err_node_put: + of_node_put(child); ++put_device: ++ put_device(gpio_ext_dev); + return ret; + } + +diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c +index 8c5bdc3847ee7..880fc8def5309 100644 +--- a/drivers/leds/leds-turris-omnia.c ++++ b/drivers/leds/leds-turris-omnia.c +@@ -98,9 +98,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led, + } + + ret = of_property_read_u32(np, "color", &color); +- if (ret || color != LED_COLOR_ID_MULTI) { ++ if (ret || color != LED_COLOR_ID_RGB) { + dev_warn(dev, +- "Node %pOF: must contain 'color' property with value LED_COLOR_ID_MULTI\n", ++ "Node %pOF: must contain 'color' property with value LED_COLOR_ID_RGB\n", + np); + return 0; + } +diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c +index f3d1a460fbce1..0ee3272491501 100644 +--- a/drivers/macintosh/adb-iop.c ++++ b/drivers/macintosh/adb-iop.c +@@ -25,6 +25,7 @@ + static struct adb_request *current_req; + static struct adb_request *last_req; + static unsigned int autopoll_devs; ++static u8 autopoll_addr; + + static enum adb_iop_state { + idle, +@@ -41,6 +42,11 @@ static int adb_iop_autopoll(int); + static void adb_iop_poll(void); + static int adb_iop_reset_bus(void); + ++/* ADB command byte structure */ ++#define ADDR_MASK 0xF0 ++#define OP_MASK 0x0C ++#define TALK 0x0C ++ + struct adb_driver adb_iop_driver = { + .name = "ISM IOP", + .probe = adb_iop_probe, +@@ -78,10 +84,7 @@ static void adb_iop_complete(struct iop_msg *msg) + + local_irq_save(flags); + +- if (current_req->reply_expected) +- adb_iop_state = awaiting_reply; +- else +- adb_iop_done(); ++ adb_iop_state = awaiting_reply; + + local_irq_restore(flags); + } +@@ -89,38 +92,52 @@ static void adb_iop_complete(struct iop_msg *msg) + /* + * Listen for ADB messages from the IOP. + * +- * This will be called when unsolicited messages (usually replies to TALK +- * commands or autopoll packets) are received. ++ * This will be called when unsolicited IOP messages are received. ++ * These IOP messages can carry ADB autopoll responses and also occur ++ * after explicit ADB commands. + */ + + static void adb_iop_listen(struct iop_msg *msg) + { + struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message; ++ u8 addr = (amsg->cmd & ADDR_MASK) >> 4; ++ u8 op = amsg->cmd & OP_MASK; + unsigned long flags; + bool req_done = false; + + local_irq_save(flags); + +- /* Handle a timeout. Timeout packets seem to occur even after +- * we've gotten a valid reply to a TALK, presumably because of +- * autopolling. ++ /* Responses to Talk commands may be unsolicited as they are ++ * produced when the IOP polls devices. They are mostly timeouts. + */ +- +- if (amsg->flags & ADB_IOP_EXPLICIT) { ++ if (op == TALK && ((1 << addr) & autopoll_devs)) ++ autopoll_addr = addr; ++ ++ switch (amsg->flags & (ADB_IOP_EXPLICIT | ++ ADB_IOP_AUTOPOLL | ++ ADB_IOP_TIMEOUT)) { ++ case ADB_IOP_EXPLICIT: ++ case ADB_IOP_EXPLICIT | ADB_IOP_TIMEOUT: + if (adb_iop_state == awaiting_reply) { + struct adb_request *req = current_req; + +- req->reply_len = amsg->count + 1; +- memcpy(req->reply, &amsg->cmd, req->reply_len); ++ if (req->reply_expected) { ++ req->reply_len = amsg->count + 1; ++ memcpy(req->reply, &amsg->cmd, req->reply_len); ++ } + + req_done = true; + } +- } else if (!(amsg->flags & ADB_IOP_TIMEOUT)) { +- adb_input(&amsg->cmd, amsg->count + 1, +- amsg->flags & ADB_IOP_AUTOPOLL); ++ break; ++ case ADB_IOP_AUTOPOLL: ++ if (((1 << addr) & autopoll_devs) && ++ amsg->cmd == ADB_READREG(addr, 0)) ++ adb_input(&amsg->cmd, amsg->count + 1, 1); ++ break; + } +- +- msg->reply[0] = autopoll_devs ? ADB_IOP_AUTOPOLL : 0; ++ msg->reply[0] = autopoll_addr ? ADB_IOP_AUTOPOLL : 0; ++ msg->reply[1] = 0; ++ msg->reply[2] = autopoll_addr ? ADB_READREG(autopoll_addr, 0) : 0; + iop_complete_message(msg); + + if (req_done) +@@ -233,6 +250,9 @@ static void adb_iop_set_ap_complete(struct iop_msg *msg) + struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message; + + autopoll_devs = (amsg->data[1] << 8) | amsg->data[0]; ++ if (autopoll_devs & (1 << autopoll_addr)) ++ return; ++ autopoll_addr = autopoll_devs ? (ffs(autopoll_devs) - 1) : 0; + } + + static int adb_iop_autopoll(int devs) +diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c +index 275efe4cca0c2..8eb66c4ecf5bf 100644 +--- a/drivers/mailbox/arm_mhu_db.c ++++ b/drivers/mailbox/arm_mhu_db.c +@@ -180,7 +180,7 @@ static void mhu_db_shutdown(struct mbox_chan *chan) + + /* Reset channel */ + mhu_db_mbox_clear_irq(chan); +- kfree(chan->con_priv); ++ devm_kfree(mbox->dev, chan->con_priv); + chan->con_priv = NULL; + } + +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index cd0478d44058b..5e306bba43751 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1600,6 +1600,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para + + if (!argc) { + DMWARN("Empty message received."); ++ r = -EINVAL; + goto out_argv; + } + +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c +index 4aaf4820b6f62..f0e64e76fd793 100644 +--- a/drivers/md/md-cluster.c ++++ b/drivers/md/md-cluster.c +@@ -664,9 +664,27 @@ out: + * Takes the lock on the TOKEN lock resource so no other + * node can communicate while the operation is underway. + */ +-static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) ++static int lock_token(struct md_cluster_info *cinfo) + { +- int error, set_bit = 0; ++ int error; ++ ++ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); ++ if (error) { ++ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", ++ __func__, __LINE__, error); ++ } else { ++ /* Lock the receive sequence */ ++ mutex_lock(&cinfo->recv_mutex); ++ } ++ return error; ++} ++ ++/* lock_comm() ++ * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. ++ */ ++static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) ++{ ++ int rv, set_bit = 0; + struct mddev *mddev = cinfo->mddev; + + /* +@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) + */ + if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, + &cinfo->state)) { +- error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, ++ rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, + &cinfo->state); +- WARN_ON_ONCE(error); ++ WARN_ON_ONCE(rv); + md_wakeup_thread(mddev->thread); + set_bit = 1; + } +- error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); +- if (set_bit) +- clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); + +- if (error) +- pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", +- __func__, __LINE__, error); +- +- /* Lock the receive sequence */ +- mutex_lock(&cinfo->recv_mutex); +- return error; +-} +- +-/* lock_comm() +- * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. +- */ +-static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) +-{ + wait_event(cinfo->wait, + !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); +- +- return lock_token(cinfo, mddev_locked); ++ rv = lock_token(cinfo); ++ if (set_bit) ++ clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); ++ return rv; + } + + static void unlock_comm(struct md_cluster_info *cinfo) +@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg, + { + int ret; + +- lock_comm(cinfo, mddev_locked); +- ret = __sendmsg(cinfo, cmsg); +- unlock_comm(cinfo); ++ ret = lock_comm(cinfo, mddev_locked); ++ if (!ret) { ++ ret = __sendmsg(cinfo, cmsg); ++ unlock_comm(cinfo); ++ } + return ret; + } + +@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev) + return 0; + } + +- ret = lock_token(cinfo, 1); ++ ret = lock_token(cinfo); + clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); + return ret; + } +@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors) + int raid_slot = -1; + + md_update_sb(mddev, 1); +- lock_comm(cinfo, 1); ++ if (lock_comm(cinfo, 1)) { ++ pr_err("%s: lock_comm failed\n", __func__); ++ return; ++ } + + memset(&cmsg, 0, sizeof(cmsg)); + cmsg.type = cpu_to_le32(METADATA_UPDATED); +@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) + cmsg.type = cpu_to_le32(NEWDISK); + memcpy(cmsg.uuid, uuid, 16); + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); +- lock_comm(cinfo, 1); ++ if (lock_comm(cinfo, 1)) ++ return -EAGAIN; + ret = __sendmsg(cinfo, &cmsg); + if (ret) { + unlock_comm(cinfo); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 4136bd8142894..3be74cf3635fe 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6948,8 +6948,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) + goto busy; + + kick_rdev: +- if (mddev_is_clustered(mddev)) +- md_cluster_ops->remove_disk(mddev, rdev); ++ if (mddev_is_clustered(mddev)) { ++ if (md_cluster_ops->remove_disk(mddev, rdev)) ++ goto busy; ++ } + + md_kick_rdev_from_array(rdev); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); +@@ -7278,6 +7280,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) + return -EINVAL; + if (mddev->sync_thread || + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || ++ test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || + mddev->reshape_position != MaxSector) + return -EBUSY; + +@@ -9645,8 +9648,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) + } + } + +- if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) +- update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); ++ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { ++ ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); ++ if (ret) ++ pr_warn("md: updating array disks failed. %d\n", ret); ++ } + + /* + * Since mddev->delta_disks has already updated in update_raid_disks, +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c +index 88f90dfd368b1..ae17407e477a4 100644 +--- a/drivers/media/common/siano/smsdvb-main.c ++++ b/drivers/media/common/siano/smsdvb-main.c +@@ -1169,12 +1169,15 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev, + rc = dvb_create_media_graph(&client->adapter, true); + if (rc < 0) { + pr_err("dvb_create_media_graph failed %d\n", rc); +- goto client_error; ++ goto media_graph_error; + } + + pr_info("DVB interface registered.\n"); + return 0; + ++media_graph_error: ++ smsdvb_debugfs_release(client); ++ + client_error: + dvb_unregister_frontend(&client->frontend); + +diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c +index 1ef5af9a8c8bc..cee1a4817af99 100644 +--- a/drivers/media/i2c/imx214.c ++++ b/drivers/media/i2c/imx214.c +@@ -786,7 +786,7 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable) + if (ret < 0) + goto err_rpm_put; + } else { +- ret = imx214_start_streaming(imx214); ++ ret = imx214_stop_streaming(imx214); + if (ret < 0) + goto err_rpm_put; + pm_runtime_put(imx214->dev); +diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c +index 1cee45e353554..0ae66091a6962 100644 +--- a/drivers/media/i2c/imx219.c ++++ b/drivers/media/i2c/imx219.c +@@ -473,8 +473,8 @@ static const struct imx219_mode supported_modes[] = { + .width = 3280, + .height = 2464, + .crop = { +- .left = 0, +- .top = 0, ++ .left = IMX219_PIXEL_ARRAY_LEFT, ++ .top = IMX219_PIXEL_ARRAY_TOP, + .width = 3280, + .height = 2464 + }, +@@ -489,8 +489,8 @@ static const struct imx219_mode supported_modes[] = { + .width = 1920, + .height = 1080, + .crop = { +- .left = 680, +- .top = 692, ++ .left = 688, ++ .top = 700, + .width = 1920, + .height = 1080 + }, +@@ -505,8 +505,8 @@ static const struct imx219_mode supported_modes[] = { + .width = 1640, + .height = 1232, + .crop = { +- .left = 0, +- .top = 0, ++ .left = IMX219_PIXEL_ARRAY_LEFT, ++ .top = IMX219_PIXEL_ARRAY_TOP, + .width = 3280, + .height = 2464 + }, +@@ -521,8 +521,8 @@ static const struct imx219_mode supported_modes[] = { + .width = 640, + .height = 480, + .crop = { +- .left = 1000, +- .top = 752, ++ .left = 1008, ++ .top = 760, + .width = 1280, + .height = 960 + }, +@@ -1008,6 +1008,7 @@ static int imx219_get_selection(struct v4l2_subdev *sd, + return 0; + + case V4L2_SEL_TGT_CROP_DEFAULT: ++ case V4L2_SEL_TGT_CROP_BOUNDS: + sel->r.top = IMX219_PIXEL_ARRAY_TOP; + sel->r.left = IMX219_PIXEL_ARRAY_LEFT; + sel->r.width = IMX219_PIXEL_ARRAY_WIDTH; +diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c +index 03b4ed3a61b83..661208c9bfc5d 100644 +--- a/drivers/media/i2c/max2175.c ++++ b/drivers/media/i2c/max2175.c +@@ -503,7 +503,7 @@ static void max2175_set_bbfilter(struct max2175 *ctx) + } + } + +-static bool max2175_set_csm_mode(struct max2175 *ctx, ++static int max2175_set_csm_mode(struct max2175 *ctx, + enum max2175_csm_mode new_mode) + { + int ret = max2175_poll_csm_ready(ctx); +diff --git a/drivers/media/i2c/max9271.c b/drivers/media/i2c/max9271.c +index 0f6f7a092a463..c247db569bab0 100644 +--- a/drivers/media/i2c/max9271.c ++++ b/drivers/media/i2c/max9271.c +@@ -223,12 +223,12 @@ int max9271_enable_gpios(struct max9271_device *dev, u8 gpio_mask) + { + int ret; + +- ret = max9271_read(dev, 0x0f); ++ ret = max9271_read(dev, 0x0e); + if (ret < 0) + return 0; + + /* BIT(0) reserved: GPO is always enabled. */ +- ret |= gpio_mask | BIT(0); ++ ret |= (gpio_mask & ~BIT(0)); + ret = max9271_write(dev, 0x0e, ret); + if (ret < 0) { + dev_err(&dev->client->dev, "Failed to enable gpio (%d)\n", ret); +@@ -245,12 +245,12 @@ int max9271_disable_gpios(struct max9271_device *dev, u8 gpio_mask) + { + int ret; + +- ret = max9271_read(dev, 0x0f); ++ ret = max9271_read(dev, 0x0e); + if (ret < 0) + return 0; + + /* BIT(0) reserved: GPO cannot be disabled */ +- ret &= (~gpio_mask | BIT(0)); ++ ret &= ~(gpio_mask | BIT(0)); + ret = max9271_write(dev, 0x0e, ret); + if (ret < 0) { + dev_err(&dev->client->dev, "Failed to disable gpio (%d)\n", ret); +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c +index 8d0254d0e5ea7..8f0812e859012 100644 +--- a/drivers/media/i2c/ov5640.c ++++ b/drivers/media/i2c/ov5640.c +@@ -1216,20 +1216,6 @@ static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on) + BIT(1), on ? 0 : BIT(1)); + } + +-static int ov5640_set_stream_bt656(struct ov5640_dev *sensor, bool on) +-{ +- int ret; +- +- ret = ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00, +- on ? 0x1 : 0x00); +- if (ret) +- return ret; +- +- return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ? +- OV5640_REG_SYS_CTRL0_SW_PWUP : +- OV5640_REG_SYS_CTRL0_SW_PWDN); +-} +- + static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) + { + return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ? +@@ -1994,13 +1980,13 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on) + static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) + { + unsigned int flags = sensor->ep.bus.parallel.flags; +- u8 pclk_pol = 0; +- u8 hsync_pol = 0; +- u8 vsync_pol = 0; ++ bool bt656 = sensor->ep.bus_type == V4L2_MBUS_BT656; ++ u8 polarities = 0; + int ret; + + if (!on) { + /* Reset settings to their default values. */ ++ ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00, 0x00); + ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58); + ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, 0x20); + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00); +@@ -2024,7 +2010,35 @@ static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) + * - VSYNC: active high + * - HREF: active low + * - PCLK: active low ++ * ++ * VSYNC & HREF are not configured if BT656 bus mode is selected + */ ++ ++ /* ++ * BT656 embedded synchronization configuration ++ * ++ * CCIR656 CTRL00 ++ * - [7]: SYNC code selection (0: auto generate sync code, ++ * 1: sync code from regs 0x4732-0x4735) ++ * - [6]: f value in CCIR656 SYNC code when fixed f value ++ * - [5]: Fixed f value ++ * - [4:3]: Blank toggle data options (00: data=1'h040/1'h200, ++ * 01: data from regs 0x4736-0x4738, 10: always keep 0) ++ * - [1]: Clip data disable ++ * - [0]: CCIR656 mode enable ++ * ++ * Default CCIR656 SAV/EAV mode with default codes ++ * SAV=0xff000080 & EAV=0xff00009d is enabled here with settings: ++ * - CCIR656 mode enable ++ * - auto generation of sync codes ++ * - blank toggle data 1'h040/1'h200 ++ * - clip reserved data (0x00 & 0xff changed to 0x01 & 0xfe) ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00, ++ bt656 ? 0x01 : 0x00); ++ if (ret) ++ return ret; ++ + /* + * configure parallel port control lines polarity + * +@@ -2035,29 +2049,26 @@ static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) + * datasheet and hardware, 0 is active high + * and 1 is active low...) + */ +- if (sensor->ep.bus_type == V4L2_MBUS_PARALLEL) { +- if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING) +- pclk_pol = 1; ++ if (!bt656) { + if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) +- hsync_pol = 1; ++ polarities |= BIT(1); + if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) +- vsync_pol = 1; +- +- ret = ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, +- (pclk_pol << 5) | (hsync_pol << 1) | +- vsync_pol); +- +- if (ret) +- return ret; ++ polarities |= BIT(0); + } ++ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING) ++ polarities |= BIT(5); ++ ++ ret = ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, polarities); ++ if (ret) ++ return ret; + + /* +- * powerdown MIPI TX/RX PHY & disable MIPI ++ * powerdown MIPI TX/RX PHY & enable DVP + * + * MIPI CONTROL 00 +- * 4: PWDN PHY TX +- * 3: PWDN PHY RX +- * 2: MIPI enable ++ * [4] = 1 : Power down MIPI HS Tx ++ * [3] = 1 : Power down MIPI LS Rx ++ * [2] = 0 : DVP enable (MIPI disable) + */ + ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x18); + if (ret) +@@ -2074,8 +2085,7 @@ static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) + * - [3:0]: D[9:6] output enable + */ + ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, +- sensor->ep.bus_type == V4L2_MBUS_PARALLEL ? +- 0x7f : 0x1f); ++ bt656 ? 0x1f : 0x7f); + if (ret) + return ret; + +@@ -2925,8 +2935,6 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) + + if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) + ret = ov5640_set_stream_mipi(sensor, enable); +- else if (sensor->ep.bus_type == V4L2_MBUS_BT656) +- ret = ov5640_set_stream_bt656(sensor, enable); + else + ret = ov5640_set_stream_dvp(sensor, enable); + +diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c +index 1ed928c4ca70f..16bcb764b0e0d 100644 +--- a/drivers/media/i2c/rdacm20.c ++++ b/drivers/media/i2c/rdacm20.c +@@ -487,9 +487,18 @@ static int rdacm20_initialize(struct rdacm20_device *dev) + * Reset the sensor by cycling the OV10635 reset signal connected to the + * MAX9271 GPIO1 and verify communication with the OV10635. + */ +- max9271_clear_gpios(dev->serializer, MAX9271_GPIO1OUT); ++ ret = max9271_enable_gpios(dev->serializer, MAX9271_GPIO1OUT); ++ if (ret) ++ return ret; ++ ++ ret = max9271_clear_gpios(dev->serializer, MAX9271_GPIO1OUT); ++ if (ret) ++ return ret; + usleep_range(10000, 15000); +- max9271_set_gpios(dev->serializer, MAX9271_GPIO1OUT); ++ ++ ret = max9271_set_gpios(dev->serializer, MAX9271_GPIO1OUT); ++ if (ret) ++ return ret; + usleep_range(10000, 15000); + + again: +diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c +index 7d9401219a3ac..3b3221fd3fe8f 100644 +--- a/drivers/media/i2c/tvp5150.c ++++ b/drivers/media/i2c/tvp5150.c +@@ -2082,6 +2082,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np) + + ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0); + if (!ep_np) { ++ ret = -EINVAL; + dev_err(dev, "Error no output endpoint available\n"); + goto err_free; + } +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c +index 4e598e937dfe2..1fcd131482e0e 100644 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c +@@ -791,6 +791,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q, + atomic_dec(&q->bufs_queued); + vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, + state); ++ q->bufs[i] = NULL; + } + } + } +@@ -1232,29 +1233,15 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_format *fmt) + { + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); +- struct v4l2_subdev_format format; +- int ret; +- +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { +- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); +- return 0; +- } + +- if (fmt->pad == CIO2_PAD_SINK) { +- format.which = V4L2_SUBDEV_FORMAT_ACTIVE; +- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, +- &format); ++ mutex_lock(&q->subdev_lock); + +- if (ret) +- return ret; +- /* update colorspace etc */ +- q->subdev_fmt.colorspace = format.format.colorspace; +- q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc; +- q->subdev_fmt.quantization = format.format.quantization; +- q->subdev_fmt.xfer_func = format.format.xfer_func; +- } ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) ++ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); ++ else ++ fmt->format = q->subdev_fmt; + +- fmt->format = q->subdev_fmt; ++ mutex_unlock(&q->subdev_lock); + + return 0; + } +@@ -1271,6 +1258,9 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_format *fmt) + { + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); ++ struct v4l2_mbus_framefmt *mbus; ++ u32 mbus_code = fmt->format.code; ++ unsigned int i; + + /* + * Only allow setting sink pad format; +@@ -1279,16 +1269,29 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, + if (fmt->pad == CIO2_PAD_SOURCE) + return cio2_subdev_get_fmt(sd, cfg, fmt); + +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { +- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; +- } else { +- /* It's the sink, allow changing frame size */ +- q->subdev_fmt.width = fmt->format.width; +- q->subdev_fmt.height = fmt->format.height; +- q->subdev_fmt.code = fmt->format.code; +- fmt->format = q->subdev_fmt; ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) ++ mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); ++ else ++ mbus = &q->subdev_fmt; ++ ++ fmt->format.code = formats[0].mbus_code; ++ ++ for (i = 0; i < ARRAY_SIZE(formats); i++) { ++ if (formats[i].mbus_code == fmt->format.code) { ++ fmt->format.code = mbus_code; ++ break; ++ } + } + ++ fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH); ++ fmt->format.height = min_t(u32, fmt->format.height, ++ CIO2_IMAGE_MAX_LENGTH); ++ fmt->format.field = V4L2_FIELD_NONE; ++ ++ mutex_lock(&q->subdev_lock); ++ *mbus = fmt->format; ++ mutex_unlock(&q->subdev_lock); ++ + return 0; + } + +@@ -1547,6 +1550,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) + + /* Initialize miscellaneous variables */ + mutex_init(&q->lock); ++ mutex_init(&q->subdev_lock); + + /* Initialize formats to default values */ + fmt = &q->subdev_fmt; +@@ -1663,6 +1667,7 @@ fail_vdev_media_entity: + fail_subdev_media_entity: + cio2_fbpt_exit(q, &cio2->pci_dev->dev); + fail_fbpt: ++ mutex_destroy(&q->subdev_lock); + mutex_destroy(&q->lock); + + return r; +@@ -1675,6 +1680,7 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) + v4l2_device_unregister_subdev(&q->subdev); + media_entity_cleanup(&q->subdev.entity); + cio2_fbpt_exit(q, &cio2->pci_dev->dev); ++ mutex_destroy(&q->subdev_lock); + mutex_destroy(&q->lock); + } + +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h +index 549b08f88f0c7..146492383aa5b 100644 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h +@@ -335,6 +335,7 @@ struct cio2_queue { + + /* Subdev, /dev/v4l-subdevX */ + struct v4l2_subdev subdev; ++ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */ + struct media_pad subdev_pads[CIO2_PADS]; + struct v4l2_mbus_framefmt subdev_fmt; + atomic_t frame_sequence; +diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +index d4f12c250f91a..526042d8afae5 100644 +--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c ++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +@@ -175,7 +175,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) + struct spi_master *master; + struct netup_spi *nspi; + +- master = spi_alloc_master(&ndev->pci_dev->dev, ++ master = devm_spi_alloc_master(&ndev->pci_dev->dev, + sizeof(struct netup_spi)); + if (!master) { + dev_err(&ndev->pci_dev->dev, +@@ -208,6 +208,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) + ndev->pci_slot, + ndev->pci_func); + if (!spi_new_device(master, &netup_spi_board)) { ++ spi_unregister_master(master); + ndev->spi = NULL; + dev_err(&ndev->pci_dev->dev, + "%s(): unable to create SPI device\n", __func__); +@@ -226,13 +227,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) + if (!spi) + return; + ++ spi_unregister_master(spi->master); + spin_lock_irqsave(&spi->lock, flags); + reg = readw(&spi->regs->control_stat); + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); + reg = readw(&spi->regs->control_stat); + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); + spin_unlock_irqrestore(&spi->lock, flags); +- spi_unregister_master(spi->master); + ndev->spi = NULL; + } + +diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c +index 129a1f8ebe1ad..73fc901ecf3db 100644 +--- a/drivers/media/pci/saa7146/mxb.c ++++ b/drivers/media/pci/saa7146/mxb.c +@@ -641,16 +641,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio * + struct mxb *mxb = (struct mxb *)dev->ext_priv; + + DEB_D("VIDIOC_S_AUDIO %d\n", a->index); +- if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) { +- if (mxb->cur_audinput != a->index) { +- mxb->cur_audinput = a->index; +- tea6420_route(mxb, a->index); +- if (mxb->cur_audinput == 0) +- mxb_update_audmode(mxb); +- } +- return 0; ++ if (a->index >= 32 || ++ !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index))) ++ return -EINVAL; ++ ++ if (mxb->cur_audinput != a->index) { ++ mxb->cur_audinput = a->index; ++ tea6420_route(mxb, a->index); ++ if (mxb->cur_audinput == 0) ++ mxb_update_audmode(mxb); + } +- return -EINVAL; ++ return 0; + } + + #ifdef CONFIG_VIDEO_ADV_DEBUG +diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c +index 906ce86437ae3..d137b94869d82 100644 +--- a/drivers/media/pci/solo6x10/solo6x10-g723.c ++++ b/drivers/media/pci/solo6x10/solo6x10-g723.c +@@ -385,7 +385,7 @@ int solo_g723_init(struct solo_dev *solo_dev) + + ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev)); + if (ret < 0) +- return ret; ++ goto snd_error; + + ret = solo_snd_pcm_init(solo_dev); + if (ret < 0) +diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +index 227245ccaedc7..88a23bce569d9 100644 +--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c ++++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +@@ -1306,6 +1306,7 @@ static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg) + jpeg->variant->clks); + if (ret) { + dev_err(&pdev->dev, "failed to get jpeg clock:%d\n", ret); ++ put_device(&pdev->dev); + return ret; + } + +@@ -1331,6 +1332,12 @@ static void mtk_jpeg_job_timeout_work(struct work_struct *work) + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); + } ++ ++static inline void mtk_jpeg_clk_release(struct mtk_jpeg_dev *jpeg) ++{ ++ put_device(jpeg->larb); ++} ++ + static int mtk_jpeg_probe(struct platform_device *pdev) + { + struct mtk_jpeg_dev *jpeg; +@@ -1435,6 +1442,7 @@ err_m2m_init: + v4l2_device_unregister(&jpeg->v4l2_dev); + + err_dev_register: ++ mtk_jpeg_clk_release(jpeg); + + err_clk_init: + +@@ -1452,6 +1460,7 @@ static int mtk_jpeg_remove(struct platform_device *pdev) + video_device_release(jpeg->vdev); + v4l2_m2m_release(jpeg->m2m_dev); + v4l2_device_unregister(&jpeg->v4l2_dev); ++ mtk_jpeg_clk_release(jpeg); + + return 0; + } +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c +index 36dfe3fc056a4..ddee7046ce422 100644 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c +@@ -47,11 +47,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) + dec_clk->clk_info = devm_kcalloc(&pdev->dev, + dec_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); +- if (!dec_clk->clk_info) +- return -ENOMEM; ++ if (!dec_clk->clk_info) { ++ ret = -ENOMEM; ++ goto put_device; ++ } + } else { + mtk_v4l2_err("Failed to get vdec clock count"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto put_device; + } + + for (i = 0; i < dec_clk->clk_num; i++) { +@@ -60,25 +63,29 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("Failed to get clock name id = %d", i); +- return ret; ++ goto put_device; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("devm_clk_get (%d)%s fail", i, + clk_info->clk_name); +- return PTR_ERR(clk_info->vcodec_clk); ++ ret = PTR_ERR(clk_info->vcodec_clk); ++ goto put_device; + } + } + + pm_runtime_enable(&pdev->dev); +- ++ return 0; ++put_device: ++ put_device(pm->larbvdec); + return ret; + } + + void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev) + { + pm_runtime_disable(dev->pm.dev); ++ put_device(dev->pm.larbvdec); + } + + void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm) +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +index ee22902aaa71c..1a047c25679fa 100644 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +@@ -47,14 +47,16 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) + node = of_parse_phandle(dev->of_node, "mediatek,larb", 1); + if (!node) { + mtk_v4l2_err("no mediatek,larb found"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put_larbvenc; + } + + pdev = of_find_device_by_node(node); + of_node_put(node); + if (!pdev) { + mtk_v4l2_err("no mediatek,larb device found"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put_larbvenc; + } + + pm->larbvenclt = &pdev->dev; +@@ -67,11 +69,14 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) + enc_clk->clk_info = devm_kcalloc(&pdev->dev, + enc_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); +- if (!enc_clk->clk_info) +- return -ENOMEM; ++ if (!enc_clk->clk_info) { ++ ret = -ENOMEM; ++ goto put_larbvenclt; ++ } + } else { + mtk_v4l2_err("Failed to get venc clock count"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto put_larbvenclt; + } + + for (i = 0; i < enc_clk->clk_num; i++) { +@@ -80,17 +85,24 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("venc failed to get clk name %d", i); +- return ret; ++ goto put_larbvenclt; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i, + clk_info->clk_name); +- return PTR_ERR(clk_info->vcodec_clk); ++ ret = PTR_ERR(clk_info->vcodec_clk); ++ goto put_larbvenclt; + } + } + ++ return 0; ++ ++put_larbvenclt: ++ put_device(pm->larbvenclt); ++put_larbvenc: ++ put_device(pm->larbvenc); + return ret; + } + +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c +index 6103aaf43987b..d5bfd6fff85b4 100644 +--- a/drivers/media/platform/qcom/venus/core.c ++++ b/drivers/media/platform/qcom/venus/core.c +@@ -355,12 +355,26 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) + if (ret) + return ret; + ++ if (pm_ops->core_power) { ++ ret = pm_ops->core_power(dev, POWER_OFF); ++ if (ret) ++ return ret; ++ } ++ + ret = icc_set_bw(core->cpucfg_path, 0, 0); + if (ret) +- return ret; ++ goto err_cpucfg_path; + +- if (pm_ops->core_power) +- ret = pm_ops->core_power(dev, POWER_OFF); ++ ret = icc_set_bw(core->video_path, 0, 0); ++ if (ret) ++ goto err_video_path; ++ ++ return ret; ++ ++err_video_path: ++ icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); ++err_cpucfg_path: ++ pm_ops->core_power(dev, POWER_ON); + + return ret; + } +@@ -371,16 +385,20 @@ static __maybe_unused int venus_runtime_resume(struct device *dev) + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret; + ++ ret = icc_set_bw(core->video_path, kbps_to_icc(20000), 0); ++ if (ret) ++ return ret; ++ ++ ret = icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); ++ if (ret) ++ return ret; ++ + if (pm_ops->core_power) { + ret = pm_ops->core_power(dev, POWER_ON); + if (ret) + return ret; + } + +- ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000)); +- if (ret) +- return ret; +- + return hfi_core_resume(core, false); + } + +diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c +index a9538c2cc3c9d..2946547a0df4a 100644 +--- a/drivers/media/platform/qcom/venus/pm_helpers.c ++++ b/drivers/media/platform/qcom/venus/pm_helpers.c +@@ -212,6 +212,16 @@ static int load_scale_bw(struct venus_core *core) + } + mutex_unlock(&core->lock); + ++ /* ++ * keep minimum bandwidth vote for "video-mem" path, ++ * so that clks can be disabled during vdec_session_release(). ++ * Actual bandwidth drop will be done during device supend ++ * so that device can power down without any warnings. ++ */ ++ ++ if (!total_avg && !total_peak) ++ total_avg = kbps_to_icc(1000); ++ + dev_dbg(core->dev, VDBGL "total: avg_bw: %u, peak_bw: %u\n", + total_avg, total_peak); + +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c +index ddee6ee37bab1..4afc5895bee74 100644 +--- a/drivers/media/rc/sunxi-cir.c ++++ b/drivers/media/rc/sunxi-cir.c +@@ -137,6 +137,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id) + } else if (status & REG_RXSTA_RPE) { + ir_raw_event_set_idle(ir->rc, true); + ir_raw_event_handle(ir->rc); ++ } else { ++ ir_raw_event_handle(ir->rc); + } + + spin_unlock(&ir->ir_lock); +diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c +index c295f642d352c..158c8e28ed2cc 100644 +--- a/drivers/media/usb/gspca/gspca.c ++++ b/drivers/media/usb/gspca/gspca.c +@@ -1575,6 +1575,7 @@ out: + input_unregister_device(gspca_dev->input_dev); + #endif + v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); ++ v4l2_device_unregister(&gspca_dev->v4l2_dev); + kfree(gspca_dev->usb_buf); + kfree(gspca_dev); + return ret; +diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c +index bfba06ea60e9d..2df736c029d6e 100644 +--- a/drivers/media/usb/tm6000/tm6000-video.c ++++ b/drivers/media/usb/tm6000/tm6000-video.c +@@ -461,11 +461,12 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev) + if (dev->urb_buffer) + return 0; + +- dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL); ++ dev->urb_buffer = kmalloc_array(num_bufs, sizeof(*dev->urb_buffer), ++ GFP_KERNEL); + if (!dev->urb_buffer) + return -ENOMEM; + +- dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *), ++ dev->urb_dma = kmalloc_array(num_bufs, sizeof(*dev->urb_dma), + GFP_KERNEL); + if (!dev->urb_dma) + return -ENOMEM; +diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c +index d7bbe33840cb4..dfc53d11053fc 100644 +--- a/drivers/media/v4l2-core/v4l2-fwnode.c ++++ b/drivers/media/v4l2-core/v4l2-fwnode.c +@@ -93,7 +93,7 @@ v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type) + const struct v4l2_fwnode_bus_conv *conv = + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); + +- return conv ? conv->mbus_type : V4L2_MBUS_UNKNOWN; ++ return conv ? conv->mbus_type : V4L2_MBUS_INVALID; + } + + static const char * +@@ -436,6 +436,10 @@ static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, + v4l2_fwnode_mbus_type_to_string(vep->bus_type), + vep->bus_type); + mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type); ++ if (mbus_type == V4L2_MBUS_INVALID) { ++ pr_debug("unsupported bus type %u\n", bus_type); ++ return -EINVAL; ++ } + + if (vep->bus_type != V4L2_MBUS_UNKNOWN) { + if (mbus_type != V4L2_MBUS_UNKNOWN && +diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig +index 00e013b14703e..cc2c83e1accfb 100644 +--- a/drivers/memory/Kconfig ++++ b/drivers/memory/Kconfig +@@ -128,7 +128,7 @@ config OMAP_GPMC_DEBUG + + config TI_EMIF_SRAM + tristate "Texas Instruments EMIF SRAM driver" +- depends on SOC_AM33XX || SOC_AM43XX || (ARM && COMPILE_TEST) ++ depends on SOC_AM33XX || SOC_AM43XX || (ARM && CPU_V7 && COMPILE_TEST) + depends on SRAM + help + This driver is for the EMIF module available on Texas Instruments +diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c +index 3ec5cb0fce1ee..555f7ac3b7dd9 100644 +--- a/drivers/memory/jz4780-nemc.c ++++ b/drivers/memory/jz4780-nemc.c +@@ -291,6 +291,8 @@ static int jz4780_nemc_probe(struct platform_device *pdev) + nemc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -EINVAL; + + /* + * The driver currently only uses the registers up to offset +@@ -304,9 +306,9 @@ static int jz4780_nemc_probe(struct platform_device *pdev) + } + + nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN); +- if (IS_ERR(nemc->base)) { ++ if (!nemc->base) { + dev_err(dev, "failed to get I/O memory\n"); +- return PTR_ERR(nemc->base); ++ return -ENOMEM; + } + + writel(0, nemc->base + NEMC_NFCSR); +diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c +index f2a33a1af8361..da0fdb4c75959 100644 +--- a/drivers/memory/renesas-rpc-if.c ++++ b/drivers/memory/renesas-rpc-if.c +@@ -212,7 +212,7 @@ EXPORT_SYMBOL(rpcif_enable_rpm); + + void rpcif_disable_rpm(struct rpcif *rpc) + { +- pm_runtime_put_sync(rpc->dev); ++ pm_runtime_disable(rpc->dev); + } + EXPORT_SYMBOL(rpcif_disable_rpm); + +@@ -508,7 +508,8 @@ exit: + return ret; + + err_out: +- ret = reset_control_reset(rpc->rstc); ++ if (reset_control_reset(rpc->rstc)) ++ dev_err(rpc->dev, "Failed to reset HW\n"); + rpcif_hw_init(rpc, rpc->bus_size == 2); + goto exit; + } +@@ -560,9 +561,11 @@ static int rpcif_probe(struct platform_device *pdev) + } else if (of_device_is_compatible(flash, "cfi-flash")) { + name = "rpc-if-hyperflash"; + } else { ++ of_node_put(flash); + dev_warn(&pdev->dev, "unknown flash type\n"); + return -ENODEV; + } ++ of_node_put(flash); + + vdev = platform_device_alloc(name, pdev->id); + if (!vdev) +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c +index ef03d6fafc5ce..12bc3f5a6cbbd 100644 +--- a/drivers/memstick/core/memstick.c ++++ b/drivers/memstick/core/memstick.c +@@ -468,7 +468,6 @@ static void memstick_check(struct work_struct *work) + host->card = card; + if (device_register(&card->dev)) { + put_device(&card->dev); +- kfree(host->card); + host->card = NULL; + } + } else +diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c +index dd3a1f3dcc191..d2ef46337191c 100644 +--- a/drivers/memstick/host/r592.c ++++ b/drivers/memstick/host/r592.c +@@ -759,8 +759,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto error3; + + dev->mmio = pci_ioremap_bar(pdev, 0); +- if (!dev->mmio) ++ if (!dev->mmio) { ++ error = -ENOMEM; + goto error4; ++ } + + dev->irq = pdev->irq; + spin_lock_init(&dev->irq_lock); +@@ -786,12 +788,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) + &dev->dummy_dma_page_physical_address, GFP_KERNEL); + r592_stop_dma(dev , 0); + +- if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, +- DRV_NAME, dev)) ++ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED, ++ DRV_NAME, dev); ++ if (error) + goto error6; + + r592_update_card_detect(dev); +- if (memstick_add_host(host)) ++ error = memstick_add_host(host); ++ if (error) + goto error7; + + message("driver successfully loaded"); +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index 8b99a13669bfc..4789507f325b8 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -1189,6 +1189,7 @@ config MFD_SIMPLE_MFD_I2C + config MFD_SL28CPLD + tristate "Kontron sl28cpld Board Management Controller" + depends on I2C ++ depends on ARCH_LAYERSCAPE || COMPILE_TEST + select MFD_SIMPLE_MFD_I2C + help + Say yes here to enable support for the Kontron sl28cpld board +diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c +index 247f9849e54ae..417b0355d904d 100644 +--- a/drivers/mfd/htc-i2cpld.c ++++ b/drivers/mfd/htc-i2cpld.c +@@ -346,6 +346,7 @@ static int htcpld_register_chip_i2c( + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + dev_warn(dev, "i2c adapter %d non-functional\n", + pdata->i2c_adapter_id); ++ i2c_put_adapter(adapter); + return -EINVAL; + } + +@@ -360,6 +361,7 @@ static int htcpld_register_chip_i2c( + /* I2C device registration failed, contineu with the next */ + dev_warn(dev, "Unable to add I2C device for 0x%x\n", + plat_chip_data->addr); ++ i2c_put_adapter(adapter); + return PTR_ERR(client); + } + +diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c +index 2283d88adcc25..30d82bfe5b02f 100644 +--- a/drivers/mfd/motorola-cpcap.c ++++ b/drivers/mfd/motorola-cpcap.c +@@ -97,7 +97,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = { + .ack_base = CPCAP_REG_MI1, + .mask_base = CPCAP_REG_MIM1, + .use_ack = true, +- .ack_invert = true, ++ .clear_ack = true, + }, + { + .name = "cpcap-m2", +@@ -106,7 +106,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = { + .ack_base = CPCAP_REG_MI2, + .mask_base = CPCAP_REG_MIM2, + .use_ack = true, +- .ack_invert = true, ++ .clear_ack = true, + }, + { + .name = "cpcap1-4", +@@ -115,7 +115,7 @@ static struct regmap_irq_chip cpcap_irq_chip[CPCAP_NR_IRQ_CHIPS] = { + .ack_base = CPCAP_REG_INT1, + .mask_base = CPCAP_REG_INTM1, + .use_ack = true, +- .ack_invert = true, ++ .clear_ack = true, + }, + }; + +diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c +index 5e680bfdf5c90..988e2ba6dd0f3 100644 +--- a/drivers/mfd/stmfx.c ++++ b/drivers/mfd/stmfx.c +@@ -329,11 +329,11 @@ static int stmfx_chip_init(struct i2c_client *client) + + stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd"); + ret = PTR_ERR_OR_ZERO(stmfx->vdd); +- if (ret == -ENODEV) { +- stmfx->vdd = NULL; +- } else { +- return dev_err_probe(&client->dev, ret, +- "Failed to get VDD regulator\n"); ++ if (ret) { ++ if (ret == -ENODEV) ++ stmfx->vdd = NULL; ++ else ++ return dev_err_probe(&client->dev, ret, "Failed to get VDD regulator\n"); + } + + if (stmfx->vdd) { +diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c +index 146ca6fb3260f..d3844730eacaf 100644 +--- a/drivers/misc/pci_endpoint_test.c ++++ b/drivers/misc/pci_endpoint_test.c +@@ -811,8 +811,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, + + pci_set_master(pdev); + +- if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) ++ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) { ++ err = -EINVAL; + goto err_disable_irq; ++ } + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { +@@ -849,8 +851,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, + goto err_ida_remove; + } + +- if (!pci_endpoint_test_request_irq(test)) ++ if (!pci_endpoint_test_request_irq(test)) { ++ err = -EINVAL; + goto err_kfree_test_name; ++ } + + misc_device = &test->miscdev; + misc_device->minor = MISC_DYNAMIC_MINOR; +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c +index 29f6180a00363..316393c694d7a 100644 +--- a/drivers/mmc/host/pxamci.c ++++ b/drivers/mmc/host/pxamci.c +@@ -731,6 +731,7 @@ static int pxamci_probe(struct platform_device *pdev) + + host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); + if (IS_ERR(host->power)) { ++ ret = PTR_ERR(host->power); + dev_err(dev, "Failed requesting gpio_power\n"); + goto out; + } +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c +index ed12aacb1c736..41d193fa77bbf 100644 +--- a/drivers/mmc/host/sdhci-tegra.c ++++ b/drivers/mmc/host/sdhci-tegra.c +@@ -1272,7 +1272,7 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host, + * busy wait mode. + */ + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); +- if (cmd && cmd->busy_timeout >= 11 * HZ) ++ if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC) + val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; + else + val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c +index e9e163ae9d863..b07cbb0661fb1 100644 +--- a/drivers/mtd/mtdcore.c ++++ b/drivers/mtd/mtdcore.c +@@ -993,6 +993,8 @@ int __get_mtd_device(struct mtd_info *mtd) + } + } + ++ master->usecount++; ++ + while (mtd->parent) { + mtd->usecount++; + mtd = mtd->parent; +@@ -1059,6 +1061,8 @@ void __put_mtd_device(struct mtd_info *mtd) + mtd = mtd->parent; + } + ++ master->usecount--; ++ + if (master->_put_device) + master->_put_device(master); + +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +index dc8104e675062..81028ba35f35d 100644 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +@@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this) + int ret; + + ret = pm_runtime_get_sync(this->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(this->dev); + return ret; ++ } + + ret = gpmi_reset_block(r->gpmi_regs, false); + if (ret) +@@ -2252,7 +2254,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, + void *buf_read = NULL; + const void *buf_write = NULL; + bool direct = false; +- struct completion *completion; ++ struct completion *dma_completion, *bch_completion; + unsigned long to; + + if (check_only) +@@ -2263,8 +2265,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, + this->transfers[i].direction = DMA_NONE; + + ret = pm_runtime_get_sync(this->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(this->dev); + return ret; ++ } + + /* + * This driver currently supports only one NAND chip. Plus, dies share +@@ -2347,22 +2351,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, + this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); + } + ++ desc->callback = dma_irq_callback; ++ desc->callback_param = this; ++ dma_completion = &this->dma_done; ++ bch_completion = NULL; ++ ++ init_completion(dma_completion); ++ + if (this->bch && buf_read) { + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, + this->resources.bch_regs + HW_BCH_CTRL_SET); +- completion = &this->bch_done; +- } else { +- desc->callback = dma_irq_callback; +- desc->callback_param = this; +- completion = &this->dma_done; ++ bch_completion = &this->bch_done; ++ init_completion(bch_completion); + } + +- init_completion(completion); +- + dmaengine_submit(desc); + dma_async_issue_pending(get_dma_chan(this)); + +- to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); ++ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); + if (!to) { + dev_err(this->dev, "DMA timeout, last DMA\n"); + gpmi_dump_info(this); +@@ -2370,6 +2376,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, + goto unmap; + } + ++ if (this->bch && buf_read) { ++ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000)); ++ if (!to) { ++ dev_err(this->dev, "BCH timeout, last DMA\n"); ++ gpmi_dump_info(this); ++ ret = -ETIMEDOUT; ++ goto unmap; ++ } ++ } ++ + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, + this->resources.bch_regs + HW_BCH_CTRL_CLR); + gpmi_clear_bch(this); +diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c +index 48e6dac96be6d..817bddccb775f 100644 +--- a/drivers/mtd/nand/raw/meson_nand.c ++++ b/drivers/mtd/nand/raw/meson_nand.c +@@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf, + } + + static void meson_nfc_dma_buffer_release(struct nand_chip *nand, +- int infolen, int datalen, ++ int datalen, int infolen, + enum dma_data_direction dir) + { + struct meson_nfc *nfc = nand_get_controller_data(nand); +@@ -1044,9 +1044,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc) + + ret = clk_set_rate(nfc->device_clk, 24000000); + if (ret) +- goto err_phase_rx; ++ goto err_disable_rx; + + return 0; ++ ++err_disable_rx: ++ clk_disable_unprepare(nfc->phase_rx); + err_phase_rx: + clk_disable_unprepare(nfc->phase_tx); + err_phase_tx: +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index 777fb0de06801..dfc17a28a06b9 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -1570,6 +1570,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int i; + ++ nandc_read_buffer_sync(nandc, true); ++ + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); + +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index c352217946455..7900571fc85b3 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -318,6 +318,10 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, + buf += ret; + } + ++ if (req->ooblen) ++ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, ++ req->ooblen); ++ + return 0; + } + +diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c +index a79e4d866b08a..0ddff1a4b51fb 100644 +--- a/drivers/mtd/parsers/cmdlinepart.c ++++ b/drivers/mtd/parsers/cmdlinepart.c +@@ -226,7 +226,7 @@ static int mtdpart_setup_real(char *s) + struct cmdline_mtd_partition *this_mtd; + struct mtd_partition *parts; + int mtd_id_len, num_parts; +- char *p, *mtd_id, *semicol; ++ char *p, *mtd_id, *semicol, *open_parenth; + + /* + * Replace the first ';' by a NULL char so strrchr can work +@@ -236,6 +236,14 @@ static int mtdpart_setup_real(char *s) + if (semicol) + *semicol = '\0'; + ++ /* ++ * make sure that part-names with ":" will not be handled as ++ * part of the mtd-id with an ":" ++ */ ++ open_parenth = strchr(s, '('); ++ if (open_parenth) ++ *open_parenth = '\0'; ++ + mtd_id = s; + + /* +@@ -245,6 +253,10 @@ static int mtdpart_setup_real(char *s) + */ + p = strrchr(s, ':'); + ++ /* Restore the '(' now. */ ++ if (open_parenth) ++ *open_parenth = '('; ++ + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; +diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c +index 3f5f21a473a69..deacf87a68a06 100644 +--- a/drivers/mtd/spi-nor/atmel.c ++++ b/drivers/mtd/spi-nor/atmel.c +@@ -8,39 +8,78 @@ + + #include "core.h" + ++/* ++ * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the ++ * block protection bits. We don't support them. But legacy behavior in linux ++ * is to unlock the whole flash array on startup. Therefore, we have to support ++ * exactly this operation. ++ */ ++static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ int ret; ++ ++ /* We only support unlocking the whole flash array */ ++ if (ofs || len != nor->params->size) ++ return -EINVAL; ++ ++ /* Write 0x00 to the status register to disable write protection */ ++ ret = spi_nor_write_sr_and_check(nor, 0); ++ if (ret) ++ dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n"); ++ ++ return ret; ++} ++ ++static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = { ++ .lock = atmel_at25fs_lock, ++ .unlock = atmel_at25fs_unlock, ++ .is_locked = atmel_at25fs_is_locked, ++}; ++ ++static void atmel_at25fs_default_init(struct spi_nor *nor) ++{ ++ nor->params->locking_ops = &atmel_at25fs_locking_ops; ++} ++ ++static const struct spi_nor_fixups atmel_at25fs_fixups = { ++ .default_init = atmel_at25fs_default_init, ++}; ++ + static const struct flash_info atmel_parts[] = { + /* Atmel -- some are (confusingly) marketed as "DataFlash" */ +- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, +- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, ++ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) ++ .fixups = &atmel_at25fs_fixups }, ++ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) ++ .fixups = &atmel_at25fs_fixups }, + +- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, +- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, +- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, +- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, ++ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, + + { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + + { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, +- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, +- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, +- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, ++ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_HAS_LOCK) }, ++ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + + { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, + }; + +-static void atmel_default_init(struct spi_nor *nor) +-{ +- nor->flags |= SNOR_F_HAS_LOCK; +-} +- +-static const struct spi_nor_fixups atmel_fixups = { +- .default_init = atmel_default_init, +-}; +- + const struct spi_nor_manufacturer spi_nor_atmel = { + .name = "atmel", + .parts = atmel_parts, + .nparts = ARRAY_SIZE(atmel_parts), +- .fixups = &atmel_fixups, + }; +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c +index f0ae7a01703a1..ad6c79d9a7f86 100644 +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -906,7 +906,7 @@ static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) + * + * Return: 0 on success, -errno otherwise. + */ +-static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) ++int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) + { + if (nor->flags & SNOR_F_HAS_16BIT_SR) + return spi_nor_write_16bit_sr_and_check(nor, sr1); +@@ -2915,20 +2915,27 @@ static int spi_nor_quad_enable(struct spi_nor *nor) + } + + /** +- * spi_nor_unlock_all() - Unlocks the entire flash memory array. ++ * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array. + * @nor: pointer to a 'struct spi_nor'. + * + * Some SPI NOR flashes are write protected by default after a power-on reset + * cycle, in order to avoid inadvertent writes during power-up. Backward + * compatibility imposes to unlock the entire flash memory array at power-up + * by default. ++ * ++ * Unprotecting the entire flash array will fail for boards which are hardware ++ * write-protected. Thus any errors are ignored. + */ +-static int spi_nor_unlock_all(struct spi_nor *nor) ++static void spi_nor_try_unlock_all(struct spi_nor *nor) + { +- if (nor->flags & SNOR_F_HAS_LOCK) +- return spi_nor_unlock(&nor->mtd, 0, nor->params->size); ++ int ret; + +- return 0; ++ if (!(nor->flags & SNOR_F_HAS_LOCK)) ++ return; ++ ++ ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size); ++ if (ret) ++ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); + } + + static int spi_nor_init(struct spi_nor *nor) +@@ -2941,11 +2948,7 @@ static int spi_nor_init(struct spi_nor *nor) + return err; + } + +- err = spi_nor_unlock_all(nor); +- if (err) { +- dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); +- return err; +- } ++ spi_nor_try_unlock_all(nor); + + if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) { + /* +diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h +index 6f2f6b27173fd..6f62ee861231a 100644 +--- a/drivers/mtd/spi-nor/core.h ++++ b/drivers/mtd/spi-nor/core.h +@@ -409,6 +409,7 @@ void spi_nor_unlock_and_unprep(struct spi_nor *nor); + int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); + int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); + int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); ++int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1); + + int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); + ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, +diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c +index e0af6d25d573b..0ab07624fb73f 100644 +--- a/drivers/mtd/spi-nor/sst.c ++++ b/drivers/mtd/spi-nor/sst.c +@@ -18,7 +18,8 @@ static const struct flash_info sst_parts[] = { + SECT_4K | SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, + SECT_4K | SST_WRITE) }, +- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, ++ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, ++ SECT_4K | SPI_NOR_4BIT_BP) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, + SECT_4K | SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index 61a93b1920379..7fc4ac1582afc 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -380,10 +380,6 @@ void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) + cccr &= ~CCCR_CSR; + + if (enable) { +- /* Clear the Clock stop request if it was set */ +- if (cccr & CCCR_CSR) +- cccr &= ~CCCR_CSR; +- + /* enable m_can configuration */ + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); +diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c +index e24a99031b80f..4d49c5f2b7905 100644 +--- a/drivers/net/dsa/qca/ar9331.c ++++ b/drivers/net/dsa/qca/ar9331.c +@@ -159,6 +159,8 @@ struct ar9331_sw_priv { + struct dsa_switch ds; + struct dsa_switch_ops ops; + struct irq_domain *irqdomain; ++ u32 irq_mask; ++ struct mutex lock_irq; + struct mii_bus *mbus; /* mdio master */ + struct mii_bus *sbus; /* mdio slave */ + struct regmap *regmap; +@@ -520,32 +522,44 @@ static irqreturn_t ar9331_sw_irq(int irq, void *data) + static void ar9331_sw_mask_irq(struct irq_data *d) + { + struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d); +- struct regmap *regmap = priv->regmap; +- int ret; + +- ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK, +- AR9331_SW_GINT_PHY_INT, 0); +- if (ret) +- dev_err(priv->dev, "could not mask IRQ\n"); ++ priv->irq_mask = 0; + } + + static void ar9331_sw_unmask_irq(struct irq_data *d) ++{ ++ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ priv->irq_mask = AR9331_SW_GINT_PHY_INT; ++} ++ ++static void ar9331_sw_irq_bus_lock(struct irq_data *d) ++{ ++ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d); ++ ++ mutex_lock(&priv->lock_irq); ++} ++ ++static void ar9331_sw_irq_bus_sync_unlock(struct irq_data *d) + { + struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d); + struct regmap *regmap = priv->regmap; + int ret; + + ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK, +- AR9331_SW_GINT_PHY_INT, +- AR9331_SW_GINT_PHY_INT); ++ AR9331_SW_GINT_PHY_INT, priv->irq_mask); + if (ret) +- dev_err(priv->dev, "could not unmask IRQ\n"); ++ dev_err(priv->dev, "failed to change IRQ mask\n"); ++ ++ mutex_unlock(&priv->lock_irq); + } + + static struct irq_chip ar9331_sw_irq_chip = { + .name = AR9331_SW_NAME, + .irq_mask = ar9331_sw_mask_irq, + .irq_unmask = ar9331_sw_unmask_irq, ++ .irq_bus_lock = ar9331_sw_irq_bus_lock, ++ .irq_bus_sync_unlock = ar9331_sw_irq_bus_sync_unlock, + }; + + static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq, +@@ -584,6 +598,7 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv) + return irq ? irq : -EINVAL; + } + ++ mutex_init(&priv->lock_irq); + ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq, + IRQF_ONESHOT, AR9331_SW_NAME, priv); + if (ret) { +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c +index 862ea44beea77..5ed80d9a6b9fe 100644 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -828,13 +828,13 @@ static int emac_probe(struct platform_device *pdev) + db->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(db->clk)) { + ret = PTR_ERR(db->clk); +- goto out_iounmap; ++ goto out_dispose_mapping; + } + + ret = clk_prepare_enable(db->clk); + if (ret) { + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); +- goto out_iounmap; ++ goto out_dispose_mapping; + } + + ret = sunxi_sram_claim(&pdev->dev); +@@ -893,6 +893,8 @@ out_release_sram: + sunxi_sram_release(&pdev->dev); + out_clk_disable_unprepare: + clk_disable_unprepare(db->clk); ++out_dispose_mapping: ++ irq_dispose_mapping(ndev->irq); + out_iounmap: + iounmap(db->membase); + out: +@@ -911,6 +913,7 @@ static int emac_remove(struct platform_device *pdev) + unregister_netdev(ndev); + sunxi_sram_release(&pdev->dev); + clk_disable_unprepare(db->clk); ++ irq_dispose_mapping(ndev->irq); + iounmap(db->membase); + free_netdev(ndev); + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index be85dad2e3bc4..fcca023f22e54 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -4069,8 +4069,10 @@ static int bcmgenet_probe(struct platform_device *pdev) + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); +- if (err) ++ if (err) { ++ bcmgenet_mii_exit(dev); + goto err; ++ } + + return err; + +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +index cf9400a9886d7..d880ab2a7d962 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +@@ -878,7 +878,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->type = DPAA2_ETH_SWA_SINGLE; + swa->single.skb = skb; +- swa->sg.sgt_size = sgt_buf_size; ++ swa->single.sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c +index 567fd67e900ef..e402c62eb3137 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c +@@ -219,8 +219,11 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) + } while (count); + + no_buffers: +- if (rx_ring->next_to_use != ntu) ++ if (rx_ring->next_to_use != ntu) { ++ /* clear the status bits for the next_to_use descriptor */ ++ rx_desc->wb.qword1.status_error_len = 0; + i40e_release_rx_desc(rx_ring, ntu); ++ } + + return ok; + } +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c +index 797886524054c..98101a8e2952d 100644 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c +@@ -446,8 +446,11 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) + } + } while (--count); + +- if (rx_ring->next_to_use != ntu) ++ if (rx_ring->next_to_use != ntu) { ++ /* clear the status bits for the next_to_use descriptor */ ++ rx_desc->wb.status_error0 = 0; + ice_release_rx_desc(rx_ring, ntu); ++ } + + return ret; + } +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c +index bf48f0ded9c7d..925161959b9ba 100644 +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&lp->lock, flags); + +- return NETDEV_TX_BUSY; ++ return NETDEV_TX_OK; + } + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 8ff207aa14792..e455a2f31f070 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -50,6 +50,7 @@ + #ifdef CONFIG_RFS_ACCEL + #include <linux/cpu_rmap.h> + #endif ++#include <linux/version.h> + #include <net/devlink.h> + #include "mlx5_core.h" + #include "lib/eq.h" +@@ -233,7 +234,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) + strncat(string, ",", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); +- strncat(string, DRIVER_VERSION, remaining_size); ++ ++ snprintf(string + strlen(string), remaining_size, "%u.%u.%u", ++ (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), ++ (u16)(LINUX_VERSION_CODE & 0xffff)); + + /*Send the command*/ + MLX5_SET(set_driver_version_in, in, opcode, +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c +index b319c22c211cd..8947c3a628109 100644 +--- a/drivers/net/ethernet/microchip/lan743x_main.c ++++ b/drivers/net/ethernet/microchip/lan743x_main.c +@@ -1962,6 +1962,14 @@ static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx) + length, GFP_ATOMIC | GFP_DMA); + } + ++static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) ++{ ++ /* update the tail once per 8 descriptors */ ++ if ((index & 7) == 7) ++ lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), ++ index); ++} ++ + static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, + struct sk_buff *skb) + { +@@ -1992,6 +2000,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)); + skb_reserve(buffer_info->skb, RX_HEAD_PADDING); ++ lan743x_rx_update_tail(rx, index); + + return 0; + } +@@ -2010,6 +2019,7 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + ((buffer_info->buffer_length) & + RX_DESC_DATA0_BUF_LENGTH_MASK_)); ++ lan743x_rx_update_tail(rx, index); + } + + static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) +@@ -2220,6 +2230,7 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) + { + struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); + struct lan743x_adapter *adapter = rx->adapter; ++ int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + u32 rx_tail_flags = 0; + int count; + +@@ -2228,27 +2239,19 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + } +- count = 0; +- while (count < weight) { +- int rx_process_result = lan743x_rx_process_packet(rx); +- +- if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { +- count++; +- } else if (rx_process_result == +- RX_PROCESS_RESULT_NOTHING_TO_DO) { ++ for (count = 0; count < weight; count++) { ++ result = lan743x_rx_process_packet(rx); ++ if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) + break; +- } else if (rx_process_result == +- RX_PROCESS_RESULT_PACKET_DROPPED) { +- continue; +- } + } + rx->frame_count += count; +- if (count == weight) +- goto done; ++ if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED) ++ return weight; + + if (!napi_complete_done(napi, count)) +- goto done; ++ return count; + ++ /* re-arm interrupts, must write to rx tail on some chip variants */ + if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { +@@ -2258,10 +2261,10 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) + INT_BIT_DMA_RX_(rx->channel_number)); + } + +- /* update RX_TAIL */ +- lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), +- rx_tail_flags | rx->last_tail); +-done: ++ if (rx_tail_flags) ++ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), ++ rx_tail_flags | rx->last_tail); ++ + return count; + } + +@@ -2405,7 +2408,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx) + + netif_napi_add(adapter->netdev, + &rx->napi, lan743x_rx_napi_poll, +- rx->ring_size - 1); ++ NAPI_POLL_WEIGHT); + + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number)); +diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +index 1e7729421a825..9cf2bc5f42892 100644 +--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c ++++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +@@ -1267,7 +1267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) + + err = mscc_ocelot_init_ports(pdev, ports); + if (err) +- goto out_put_ports; ++ goto out_ocelot_deinit; + + if (ocelot->ptp) { + err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info); +@@ -1282,8 +1282,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev) + register_switchdev_notifier(&ocelot_switchdev_nb); + register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); + ++ of_node_put(ports); ++ + dev_info(&pdev->dev, "Ocelot switch probed\n"); + ++ return 0; ++ ++out_ocelot_deinit: ++ ocelot_deinit(ocelot); + out_put_ports: + of_node_put(ports); + return err; +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c +index bb448c82cdc28..c029950a81e20 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c +@@ -860,9 +860,6 @@ static void nfp_flower_clean(struct nfp_app *app) + skb_queue_purge(&app_priv->cmsg_skbs_low); + flush_work(&app_priv->cmsg_work); + +- flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, +- nfp_flower_setup_indr_tc_release); +- + if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) + nfp_flower_qos_cleanup(app); + +@@ -951,6 +948,9 @@ static int nfp_flower_start(struct nfp_app *app) + static void nfp_flower_stop(struct nfp_app *app) + { + nfp_tunnel_config_stop(app); ++ ++ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, ++ nfp_flower_setup_indr_tc_release); + } + + static int +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +index a12df3946a07c..c968c5c5a60a0 100644 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c +@@ -1129,38 +1129,10 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) + lif->rx_mode = rx_mode; + } + +-static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode, +- bool from_ndo) +-{ +- struct ionic_deferred_work *work; +- +- if (from_ndo) { +- work = kzalloc(sizeof(*work), GFP_ATOMIC); +- if (!work) { +- netdev_err(lif->netdev, "%s OOM\n", __func__); +- return; +- } +- work->type = IONIC_DW_TYPE_RX_MODE; +- work->rx_mode = rx_mode; +- netdev_dbg(lif->netdev, "deferred: rx_mode\n"); +- ionic_lif_deferred_enqueue(&lif->deferred, work); +- } else { +- ionic_lif_rx_mode(lif, rx_mode); +- } +-} +- +-static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo) +-{ +- if (from_ndo) +- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); +- else +- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); +- +-} +- +-static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) ++static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) + { + struct ionic_lif *lif = netdev_priv(netdev); ++ struct ionic_deferred_work *work; + unsigned int nfilters; + unsigned int rx_mode; + +@@ -1177,7 +1149,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) + * we remove our overflow flag and check the netdev flags + * to see if we can disable NIC PROMISC + */ +- ionic_dev_uc_sync(netdev, from_ndo); ++ if (can_sleep) ++ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); ++ else ++ __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); + nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + if (netdev_uc_count(netdev) + 1 > nfilters) { + rx_mode |= IONIC_RX_MODE_F_PROMISC; +@@ -1189,7 +1164,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) + } + + /* same for multicast */ +- ionic_dev_uc_sync(netdev, from_ndo); ++ if (can_sleep) ++ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); ++ else ++ __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); + nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); + if (netdev_mc_count(netdev) > nfilters) { + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; +@@ -1200,13 +1178,26 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + } + +- if (lif->rx_mode != rx_mode) +- _ionic_lif_rx_mode(lif, rx_mode, from_ndo); ++ if (lif->rx_mode != rx_mode) { ++ if (!can_sleep) { ++ work = kzalloc(sizeof(*work), GFP_ATOMIC); ++ if (!work) { ++ netdev_err(lif->netdev, "%s OOM\n", __func__); ++ return; ++ } ++ work->type = IONIC_DW_TYPE_RX_MODE; ++ work->rx_mode = rx_mode; ++ netdev_dbg(lif->netdev, "deferred: rx_mode\n"); ++ ionic_lif_deferred_enqueue(&lif->deferred, work); ++ } else { ++ ionic_lif_rx_mode(lif, rx_mode); ++ } ++ } + } + + static void ionic_ndo_set_rx_mode(struct net_device *netdev) + { +- ionic_set_rx_mode(netdev, true); ++ ionic_set_rx_mode(netdev, false); + } + + static __le64 ionic_netdev_features_to_nic(netdev_features_t features) +@@ -1773,7 +1764,7 @@ static int ionic_txrx_init(struct ionic_lif *lif) + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_init(lif); + +- ionic_set_rx_mode(lif->netdev, false); ++ ionic_set_rx_mode(lif->netdev, true); + + return 0; + +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +index 5a7e240fd4698..c2faf96fcade8 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +@@ -2492,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + qlcnic_sriov_vf_register_map(ahw); + break; + default: ++ err = -EINVAL; + goto err_out_free_hw_res; + } + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 21b71148c5324..34bb95dd92392 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -3072,6 +3072,7 @@ static int virtnet_probe(struct virtio_device *vdev) + dev_err(&vdev->dev, + "device MTU appears to have changed it is now %d < %d", + mtu, dev->min_mtu); ++ err = -EINVAL; + goto free; + } + +diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c +index 5cf2045fadeff..c41e72508d3db 100644 +--- a/drivers/net/wireless/admtek/adm8211.c ++++ b/drivers/net/wireless/admtek/adm8211.c +@@ -1796,6 +1796,7 @@ static int adm8211_probe(struct pci_dev *pdev, + if (io_len < 256 || mem_len < 1024) { + printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", + pci_name(pdev)); ++ err = -ENOMEM; + goto err_disable_pdev; + } + +@@ -1805,6 +1806,7 @@ static int adm8211_probe(struct pci_dev *pdev, + if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { + printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", + pci_name(pdev), reg); ++ err = -EINVAL; + goto err_disable_pdev; + } + +@@ -1815,8 +1817,8 @@ static int adm8211_probe(struct pci_dev *pdev, + return err; /* someone else grabbed it? don't disable it */ + } + +- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || +- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { ++ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ if (err) { + printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c +index 05a620ff6fe2c..19b9c27e30e20 100644 +--- a/drivers/net/wireless/ath/ath10k/usb.c ++++ b/drivers/net/wireless/ath/ath10k/usb.c +@@ -997,6 +997,8 @@ static int ath10k_usb_probe(struct usb_interface *interface, + + ar_usb = ath10k_usb_priv(ar); + ret = ath10k_usb_create(ar, interface); ++ if (ret) ++ goto err; + ar_usb->ar = ar; + + ar->dev_id = product_id; +@@ -1009,7 +1011,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, + ret = ath10k_core_register(ar, &bus_params); + if (ret) { + ath10k_warn(ar, "failed to register driver core: %d\n", ret); +- goto err; ++ goto err_usb_destroy; + } + + /* TODO: remove this once USB support is fully implemented */ +@@ -1017,6 +1019,9 @@ static int ath10k_usb_probe(struct usb_interface *interface, + + return 0; + ++err_usb_destroy: ++ ath10k_usb_destroy(ar); ++ + err: + ath10k_core_destroy(ar); + +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c +index 932266d1111bd..7b5834157fe51 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c +@@ -1401,13 +1401,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: ++ arg->service_map_ext_valid = true; + arg->service_map_ext_len = *(__le32 *)ptr; + arg->service_map_ext = ptr + sizeof(__le32); + return 0; + default: + break; + } +- return -EPROTO; ++ ++ return 0; + } + + static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c +index 1fa7107a50515..37b53af760d76 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi.c ++++ b/drivers/net/wireless/ath/ath10k/wmi.c +@@ -5751,8 +5751,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) + ret); + } + +- ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, +- __le32_to_cpu(arg.service_map_ext_len)); ++ /* ++ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary ++ * for the below logic to work. ++ */ ++ if (arg.service_map_ext_valid) ++ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, ++ __le32_to_cpu(arg.service_map_ext_len)); + } + + static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h +index 4898e19b0af65..66ecf09068c19 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi.h ++++ b/drivers/net/wireless/ath/ath10k/wmi.h +@@ -6917,6 +6917,7 @@ struct wmi_svc_rdy_ev_arg { + }; + + struct wmi_svc_avail_ev_arg { ++ bool service_map_ext_valid; + __le32 service_map_ext_len; + const __le32 *service_map_ext; + }; +diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h +index 18b97420f0d8a..5a7915f75e1e2 100644 +--- a/drivers/net/wireless/ath/ath11k/core.h ++++ b/drivers/net/wireless/ath/ath11k/core.h +@@ -75,12 +75,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid) + + enum ath11k_skb_flags { + ATH11K_SKB_HW_80211_ENCAP = BIT(0), ++ ATH11K_SKB_CIPHER_SET = BIT(1), + }; + + struct ath11k_skb_cb { + dma_addr_t paddr; + u8 eid; + u8 flags; ++ u32 cipher; + struct ath11k *ar; + struct ieee80211_vif *vif; + } __packed; +diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c +index 3d962eee4d61d..21dfd08d3debb 100644 +--- a/drivers/net/wireless/ath/ath11k/dp_tx.c ++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c +@@ -84,7 +84,6 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, + struct ath11k_dp *dp = &ab->dp; + struct hal_tx_info ti = {0}; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); +- struct ieee80211_key_conf *key = info->control.hw_key; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); + struct hal_srng *tcl_ring; + struct ieee80211_hdr *hdr = (void *)skb->data; +@@ -149,9 +148,9 @@ tcl_ring_sel: + ti.meta_data_flags = arvif->tcl_metadata; + + if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) { +- if (key) { ++ if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { + ti.encrypt_type = +- ath11k_dp_tx_get_encrypt_type(key->cipher); ++ ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); + + if (ieee80211_has_protected(hdr->frame_control)) + skb_put(skb, IEEE80211_CCMP_MIC_LEN); +diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c +index 11a411b76fe42..66331da350129 100644 +--- a/drivers/net/wireless/ath/ath11k/hw.c ++++ b/drivers/net/wireless/ath/ath11k/hw.c +@@ -127,7 +127,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab, + config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; + config->rx_batchmode = TARGET_RX_BATCHMODE; + config->peer_map_unmap_v2_support = 1; +- config->twt_ap_pdev_count = 2; ++ config->twt_ap_pdev_count = ab->num_radios; + config->twt_ap_sta_count = 1000; + } + +@@ -157,7 +157,7 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, + + const struct ath11k_hw_ops ipq8074_ops = { + .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, +- .wmi_init_config = ath11k_init_wmi_config_qca6390, ++ .wmi_init_config = ath11k_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, + }; +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c +index 7f8dd47d23333..af427d9051a07 100644 +--- a/drivers/net/wireless/ath/ath11k/mac.c ++++ b/drivers/net/wireless/ath/ath11k/mac.c +@@ -3977,21 +3977,20 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) + static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) + { + struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); +- struct ieee80211_tx_info *info; ++ struct ath11k_skb_cb *skb_cb; + struct ath11k_vif *arvif; + struct sk_buff *skb; + int ret; + + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { +- info = IEEE80211_SKB_CB(skb); +- if (!info->control.vif) { +- ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n", +- info->control.flags); ++ skb_cb = ATH11K_SKB_CB(skb); ++ if (!skb_cb->vif) { ++ ath11k_warn(ar->ab, "no vif found for mgmt frame\n"); + ieee80211_free_txskb(ar->hw, skb); + continue; + } + +- arvif = ath11k_vif_to_arvif(info->control.vif); ++ arvif = ath11k_vif_to_arvif(skb_cb->vif); + if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && + arvif->is_started) { + ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); +@@ -4004,8 +4003,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) + } + } else { + ath11k_warn(ar->ab, +- "dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n", +- arvif->vdev_id, info->control.flags, ++ "dropping mgmt frame for vdev %d, is_started %d\n", ++ arvif->vdev_id, + arvif->is_started); + ieee80211_free_txskb(ar->hw, skb); + } +@@ -4053,10 +4052,20 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif = info->control.vif; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; ++ struct ieee80211_key_conf *key = info->control.hw_key; ++ u32 info_flags = info->flags; + bool is_prb_rsp; + int ret; + +- if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { ++ memset(skb_cb, 0, sizeof(*skb_cb)); ++ skb_cb->vif = vif; ++ ++ if (key) { ++ skb_cb->cipher = key->cipher; ++ skb_cb->flags |= ATH11K_SKB_CIPHER_SET; ++ } ++ ++ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP; + } else if (ieee80211_is_mgmt(hdr->frame_control)) { + is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); +@@ -4094,7 +4103,8 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) + + if (enable) { + tlv_filter = ath11k_mac_mon_status_filter_default; +- tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); ++ if (ath11k_debugfs_rx_filter(ar)) ++ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); + } + + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { +@@ -5225,20 +5235,26 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, + arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { + memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); +- mutex_unlock(&ar->conf_mutex); +- return 0; ++ ret = 0; ++ goto out; + } + + if (WARN_ON(arvif->is_started)) { +- mutex_unlock(&ar->conf_mutex); +- return -EBUSY; ++ ret = -EBUSY; ++ goto out; + } + + if (ab->hw_params.vdev_start_delay) { + param.vdev_id = arvif->vdev_id; + param.peer_type = WMI_PEER_TYPE_DEFAULT; + param.peer_addr = ar->mac_addr; ++ + ret = ath11k_peer_create(ar, arvif, NULL, ¶m); ++ if (ret) { ++ ath11k_warn(ab, "failed to create peer after vdev start delay: %d", ++ ret); ++ goto out; ++ } + } + + ret = ath11k_mac_vdev_start(arvif, &ctx->def); +@@ -5246,23 +5262,21 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, + ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", + arvif->vdev_id, vif->addr, + ctx->def.chan->center_freq, ret); +- goto err; ++ goto out; + } + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id); + if (ret) +- goto err; ++ goto out; + } + + arvif->is_started = true; + + /* TODO: Setup ps and cts/rts protection */ + +- mutex_unlock(&ar->conf_mutex); +- +- return 0; ++ ret = 0; + +-err: ++out: + mutex_unlock(&ar->conf_mutex); + + return ret; +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c +index c2b1651582259..99a88ca83deaa 100644 +--- a/drivers/net/wireless/ath/ath11k/qmi.c ++++ b/drivers/net/wireless/ath/ath11k/qmi.c +@@ -1585,15 +1585,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) + struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; + struct qmi_handle *handle = &ab->qmi.handle; + struct qmi_txn txn; +- int ret = 0; ++ int ret; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); +- if (!resp) ++ if (!resp) { ++ ret = -ENOMEM; + goto resp_out; ++ } + + req->client_id_valid = 1; + req->client_id = QMI_WLANFW_CLIENT_ID; +diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c +index f6a1f0352989d..678d0885fcee7 100644 +--- a/drivers/net/wireless/ath/ath11k/reg.c ++++ b/drivers/net/wireless/ath/ath11k/reg.c +@@ -80,6 +80,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) + */ + init_country_param.flags = ALPHA_IS_SET; + memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2); ++ init_country_param.cc_info.alpha2[2] = 0; + + ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param); + if (ret) +@@ -584,7 +585,6 @@ ath11k_reg_build_regd(struct ath11k_base *ab, + if (!tmp_regd) + goto ret; + +- tmp_regd->n_reg_rules = num_rules; + memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); + memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); + alpha2[2] = '\0'; +@@ -597,7 +597,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab, + /* Update reg_rules[] below. Firmware is expected to + * send these rules in order(2G rules first and then 5G) + */ +- for (; i < tmp_regd->n_reg_rules; i++) { ++ for (; i < num_rules; i++) { + if (reg_info->num_2g_reg_rules && + (i < reg_info->num_2g_reg_rules)) { + reg_rule = reg_info->reg_rules_2g_ptr + i; +@@ -652,6 +652,8 @@ ath11k_reg_build_regd(struct ath11k_base *ab, + flags); + } + ++ tmp_regd->n_reg_rules = i; ++ + if (intersect) { + default_regd = ab->default_regd[reg_info->phy_id]; + +diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c +index 8eca92520837e..04b8b002edfe0 100644 +--- a/drivers/net/wireless/ath/ath11k/wmi.c ++++ b/drivers/net/wireless/ath/ath11k/wmi.c +@@ -2198,37 +2198,6 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, + } + } + +- len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); +- tlv = ptr; +- tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | +- FIELD_PREP(WMI_TLV_LEN, len); +- ptr += TLV_HDR_SIZE; +- if (params->num_hint_s_ssid) { +- s_ssid = ptr; +- for (i = 0; i < params->num_hint_s_ssid; ++i) { +- s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; +- s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; +- s_ssid++; +- } +- } +- ptr += len; +- +- len = params->num_hint_bssid * sizeof(struct hint_bssid); +- tlv = ptr; +- tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | +- FIELD_PREP(WMI_TLV_LEN, len); +- ptr += TLV_HDR_SIZE; +- if (params->num_hint_bssid) { +- hint_bssid = ptr; +- for (i = 0; i < params->num_hint_bssid; ++i) { +- hint_bssid->freq_flags = +- params->hint_bssid[i].freq_flags; +- ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], +- &hint_bssid->bssid.addr[0]); +- hint_bssid++; +- } +- } +- + ret = ath11k_wmi_cmd_send(wmi, skb, + WMI_START_SCAN_CMDID); + if (ret) { +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index a2dbbb977d0cb..0ee421f30aa24 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -2137,7 +2137,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, + BRCMF_WSEC_MAX_PSK_LEN); + else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) { + /* clean up user-space RSNE */ +- if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) { ++ err = brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0); ++ if (err) { + bphy_err(drvr, "failed to clean up user-space RSNE\n"); + goto done; + } +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +index 39381cbde89e6..d8db0dbcfe091 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +@@ -1936,16 +1936,18 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) + fwreq = brcmf_pcie_prepare_fw_request(devinfo); + if (!fwreq) { + ret = -ENOMEM; +- goto fail_bus; ++ goto fail_brcmf; + } + + ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup); + if (ret < 0) { + kfree(fwreq); +- goto fail_bus; ++ goto fail_brcmf; + } + return 0; + ++fail_brcmf: ++ brcmf_free(&devinfo->pdev->dev); + fail_bus: + kfree(bus->msgbuf); + kfree(bus); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +index 99987a789e7e3..59c2b2b6027da 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +@@ -4541,6 +4541,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) + brcmf_sdiod_intr_unregister(bus->sdiodev); + + brcmf_detach(bus->sdiodev->dev); ++ brcmf_free(bus->sdiodev->dev); + + cancel_work_sync(&bus->datawork); + if (bus->brcmf_wq) +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +index 51ce93d21ffe5..8fa1c22fd96db 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +@@ -808,7 +808,7 @@ static bool is_trig_data_contained(struct iwl_ucode_tlv *new, + struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data; + __le32 *new_data = new_trig->data, *old_data = old_trig->data; + u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data); +- u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data); ++ u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data); + int i, j; + + for (i = 0; i < new_dwords_num; i++) { +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index f1c5b3a9c26f7..0d1118f66f0d5 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -315,6 +315,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { + iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, + iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), ++ RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, ++ iwl_mvm_probe_resp_data_notif, ++ RX_HANDLER_ASYNC_LOCKED), ++ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, ++ iwl_mvm_channel_switch_noa_notif, ++ RX_HANDLER_SYNC), + }; + #undef RX_HANDLER + #undef RX_HANDLER_GRP +diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +index b849d27bd741e..d1fc948364c79 100644 +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +@@ -1223,13 +1223,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) + if (skb->len < ETH_HLEN) + goto drop; + +- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); +- if (!ctx) +- goto busy; +- +- memset(ctx->buf, 0, BULK_BUF_SIZE); +- buf = ctx->buf->data; +- + tx_control = 0; + + err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, +@@ -1237,6 +1230,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) + if (err) + goto drop; + ++ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); ++ if (!ctx) ++ goto drop; ++ ++ memset(ctx->buf, 0, BULK_BUF_SIZE); ++ buf = ctx->buf->data; ++ + { + __le16 *tx_cntl = (__le16 *)buf; + *tx_cntl = cpu_to_le16(tx_control); +diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c +index 9ba8a8f64976b..6283df5aaaf8b 100644 +--- a/drivers/net/wireless/marvell/mwifiex/main.c ++++ b/drivers/net/wireless/marvell/mwifiex/main.c +@@ -1471,6 +1471,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); + mwifiex_deauthenticate(priv, NULL); + ++ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); ++ + mwifiex_uninit_sw(adapter); + adapter->is_up = false; + +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c +index 214fc95b8a33f..145e839fea4e5 100644 +--- a/drivers/net/wireless/mediatek/mt76/dma.c ++++ b/drivers/net/wireless/mediatek/mt76/dma.c +@@ -72,9 +72,11 @@ mt76_free_pending_txwi(struct mt76_dev *dev) + { + struct mt76_txwi_cache *t; + ++ local_bh_disable(); + while ((t = __mt76_get_txwi(dev)) != NULL) + dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, + DMA_TO_DEVICE); ++ local_bh_enable(); + } + + static int +diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c +index 4befe7f937a91..466447a5184f8 100644 +--- a/drivers/net/wireless/mediatek/mt76/mac80211.c ++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c +@@ -305,6 +305,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ++ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { + ieee80211_hw_set(hw, TX_AMSDU); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +index a5845da3547a9..06fa28f645f28 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +@@ -57,7 +57,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + return 0; + error: +- ieee80211_free_hw(mt76_hw(dev)); ++ mt76_free_device(&dev->mt76); ++ + return ret; + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +index 8dc645e398fda..3d62fda067e44 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +@@ -1046,15 +1046,17 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, + if (cmd == SET_KEY) { + if (cipher == MT_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ ++ memcpy(data, key, 16); + memcpy(data + 16, key + 24, 8); + memcpy(data + 24, key + 16, 8); ++ } else { ++ if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) ++ memmove(data + 16, data, 16); ++ if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) ++ memcpy(data, key, keylen); ++ else if (cipher == MT_CIPHER_BIP_CMAC_128) ++ memcpy(data + 16, key, 16); + } +- if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) +- memmove(data + 16, data, 16); +- if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) +- memcpy(data, key, keylen); +- else if (cipher == MT_CIPHER_BIP_CMAC_128) +- memcpy(data + 16, key, 16); + } else { + if (wcid->cipher & ~BIT(cipher)) { + if (cipher != MT_CIPHER_BIP_CMAC_128) +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +index 6de492a4cf025..9b191307e140e 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +@@ -240,7 +240,8 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, + + return 0; + error: +- ieee80211_free_hw(mt76_hw(dev)); ++ mt76_free_device(&dev->mt76); ++ + return ret; + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +index 2486cda3243bc..69e38f477b1e4 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +@@ -150,7 +150,7 @@ static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, + return -EBUSY; + } else { + if (sdio->sched.pse_data_quota < *pse_size + pse_sz || +- sdio->sched.ple_data_quota < *ple_size) ++ sdio->sched.ple_data_quota < *ple_size + 1) + return -EBUSY; + + *ple_size = *ple_size + 1; +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +index dda11c704abaa..b87d8e136cb9a 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +@@ -194,7 +194,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) + return 0; + + error: +- ieee80211_free_hw(mt76_hw(dev)); ++ mt76_free_device(&dev->mt76); ++ + return ret; + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +index 4d50dad29ddff..ecaf85b483ac3 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +@@ -90,7 +90,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) + return 0; + + error: +- ieee80211_free_hw(mt76_hw(dev)); ++ mt76_free_device(&dev->mt76); ++ + return ret; + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +index 1049927faf246..8f2ad32ade180 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +@@ -233,6 +233,7 @@ static const struct file_operations fops_tx_stats = { + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, ++ .owner = THIS_MODULE, + }; + + static int mt7915_read_temperature(struct seq_file *s, void *data) +@@ -460,6 +461,7 @@ static const struct file_operations fops_sta_stats = { + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, ++ .owner = THIS_MODULE, + }; + + void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +index fe62b4d853e48..3ac5bbb94d294 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +@@ -140,7 +140,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, + dev = container_of(mdev, struct mt7915_dev, mt76); + ret = mt7915_alloc_device(pdev, dev); + if (ret) +- return ret; ++ goto error; + + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | +@@ -163,7 +163,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev, + + return 0; + error: +- ieee80211_free_hw(mt76_hw(dev)); ++ mt76_free_device(&dev->mt76); ++ + return ret; + } + +diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +index 5337e67092ca6..0f328ce47fee3 100644 +--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c ++++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +@@ -299,19 +299,19 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) + sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); + if (IS_ERR(sysctl_bar)) { + pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); +- return ret; ++ return PTR_ERR(sysctl_bar); + } + + dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); + if (IS_ERR(dmareg_bar)) { + pr_err("failed to map BAR%u\n", QTN_DMA_BAR); +- return ret; ++ return PTR_ERR(dmareg_bar); + } + + epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); + if (IS_ERR(epmem_bar)) { + pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); +- return ret; ++ return PTR_ERR(epmem_bar); + } + + chipid = qtnf_chip_id_get(sysctl_bar); +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c +index a62d41c0ccbc0..00b5589847985 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c +@@ -741,24 +741,24 @@ static int rsi_reset_card(struct rsi_hw *adapter) + if (ret < 0) + goto fail; + } else { +- if ((rsi_usb_master_reg_write(adapter, +- NWP_WWD_INTERRUPT_TIMER, +- NWP_WWD_INT_TIMER_CLKS, +- RSI_9116_REG_SIZE)) < 0) { ++ ret = rsi_usb_master_reg_write(adapter, ++ NWP_WWD_INTERRUPT_TIMER, ++ NWP_WWD_INT_TIMER_CLKS, ++ RSI_9116_REG_SIZE); ++ if (ret < 0) + goto fail; +- } +- if ((rsi_usb_master_reg_write(adapter, +- NWP_WWD_SYSTEM_RESET_TIMER, +- NWP_WWD_SYS_RESET_TIMER_CLKS, +- RSI_9116_REG_SIZE)) < 0) { ++ ret = rsi_usb_master_reg_write(adapter, ++ NWP_WWD_SYSTEM_RESET_TIMER, ++ NWP_WWD_SYS_RESET_TIMER_CLKS, ++ RSI_9116_REG_SIZE); ++ if (ret < 0) + goto fail; +- } +- if ((rsi_usb_master_reg_write(adapter, +- NWP_WWD_MODE_AND_RSTART, +- NWP_WWD_TIMER_DISABLE, +- RSI_9116_REG_SIZE)) < 0) { ++ ret = rsi_usb_master_reg_write(adapter, ++ NWP_WWD_MODE_AND_RSTART, ++ NWP_WWD_TIMER_DISABLE, ++ RSI_9116_REG_SIZE); ++ if (ret < 0) + goto fail; +- } + } + + rsi_dbg(INFO_ZONE, "Reset card done\n"); +diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c +index f7fe56affbcd2..326b1cc1d2bcb 100644 +--- a/drivers/net/wireless/st/cw1200/main.c ++++ b/drivers/net/wireless/st/cw1200/main.c +@@ -381,6 +381,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + CW1200_LINK_ID_MAX, + cw1200_skb_dtor, + priv)) { ++ destroy_workqueue(priv->workqueue); + ieee80211_free_hw(hw); + return NULL; + } +@@ -392,6 +393,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + for (; i > 0; i--) + cw1200_queue_deinit(&priv->tx_queue[i - 1]); + cw1200_queue_stats_deinit(&priv->tx_queue_stats); ++ destroy_workqueue(priv->workqueue); + ieee80211_free_hw(hw); + return NULL; + } +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c +index f1c1624cec8f5..6f10e0998f1ce 100644 +--- a/drivers/net/xen-netback/xenbus.c ++++ b/drivers/net/xen-netback/xenbus.c +@@ -557,12 +557,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev, + return -ENOMEM; + snprintf(node, maxlen, "%s/rate", dev->nodename); + vif->credit_watch.node = node; ++ vif->credit_watch.will_handle = NULL; + vif->credit_watch.callback = xen_net_rate_changed; + err = register_xenbus_watch(&vif->credit_watch); + if (err) { + pr_err("Failed to set watcher %s\n", vif->credit_watch.node); + kfree(node); + vif->credit_watch.node = NULL; ++ vif->credit_watch.will_handle = NULL; + vif->credit_watch.callback = NULL; + } + return err; +@@ -609,6 +611,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, + snprintf(node, maxlen, "%s/request-multicast-control", + dev->otherend); + vif->mcast_ctrl_watch.node = node; ++ vif->mcast_ctrl_watch.will_handle = NULL; + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; + err = register_xenbus_watch(&vif->mcast_ctrl_watch); + if (err) { +@@ -616,6 +619,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, + vif->mcast_ctrl_watch.node); + kfree(node); + vif->mcast_ctrl_watch.node = NULL; ++ vif->mcast_ctrl_watch.will_handle = NULL; + vif->mcast_ctrl_watch.callback = NULL; + } + return err; +@@ -820,7 +824,7 @@ static void connect(struct backend_info *be) + xenvif_carrier_on(be->vif); + + unregister_hotplug_status_watch(be); +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) +diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c +index ec930ee2c847e..64df50827642b 100644 +--- a/drivers/nfc/s3fwrn5/firmware.c ++++ b/drivers/nfc/s3fwrn5/firmware.c +@@ -293,8 +293,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info) + if (ret < 0) + return ret; + +- if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) ++ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) { ++ release_firmware(fw->fw); + return -EINVAL; ++ } + + memcpy(fw->date, fw->fw->data + 0x00, 12); + fw->date[12] = '\0'; +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c +index 47a4828b8b310..9251441fd8a35 100644 +--- a/drivers/nvdimm/label.c ++++ b/drivers/nvdimm/label.c +@@ -980,6 +980,15 @@ static int __blk_label_update(struct nd_region *nd_region, + } + } + ++ /* release slots associated with any invalidated UUIDs */ ++ mutex_lock(&nd_mapping->lock); ++ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) ++ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) { ++ reap_victim(nd_mapping, label_ent); ++ list_move(&label_ent->list, &list); ++ } ++ mutex_unlock(&nd_mapping->lock); ++ + /* + * Find the resource associated with the first label in the set + * per the v1.2 namespace specification. +@@ -999,8 +1008,10 @@ static int __blk_label_update(struct nd_region *nd_region, + if (is_old_resource(res, old_res_list, old_num_resources)) + continue; /* carry-over */ + slot = nd_label_alloc_slot(ndd); +- if (slot == UINT_MAX) ++ if (slot == UINT_MAX) { ++ rc = -ENXIO; + goto abort; ++ } + dev_dbg(ndd->dev, "allocated: %d\n", slot); + + nd_label = to_label(ndd, slot); +diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c +index bea86899bd5df..9c3d2982248d3 100644 +--- a/drivers/pci/controller/pcie-brcmstb.c ++++ b/drivers/pci/controller/pcie-brcmstb.c +@@ -893,6 +893,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) + burst = 0x2; /* 512 bytes */ + + /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */ ++ tmp = readl(base + PCIE_MISC_MISC_CTRL); + u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK); + u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); + u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); +diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c +index 905e938082432..cc5b7823edeb7 100644 +--- a/drivers/pci/controller/pcie-iproc.c ++++ b/drivers/pci/controller/pcie-iproc.c +@@ -192,8 +192,15 @@ static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { + .imap_window_offset = 0x4, + }, + { +- /* IARR1/IMAP1 (currently unused) */ +- .type = IPROC_PCIE_IB_MAP_INVALID, ++ /* IARR1/IMAP1 */ ++ .type = IPROC_PCIE_IB_MAP_MEM, ++ .size_unit = SZ_1M, ++ .region_sizes = { 8 }, ++ .nr_sizes = 1, ++ .nr_windows = 8, ++ .imap_addr_offset = 0x4, ++ .imap_window_offset = 0x8, ++ + }, + { + /* IARR2/IMAP2 */ +@@ -307,7 +314,7 @@ enum iproc_pcie_reg { + }; + + /* iProc PCIe PAXB BCMA registers */ +-static const u16 iproc_pcie_reg_paxb_bcma[] = { ++static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, +@@ -318,7 +325,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = { + }; + + /* iProc PCIe PAXB registers */ +-static const u16 iproc_pcie_reg_paxb[] = { ++static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, +@@ -334,7 +341,7 @@ static const u16 iproc_pcie_reg_paxb[] = { + }; + + /* iProc PCIe PAXB v2 registers */ +-static const u16 iproc_pcie_reg_paxb_v2[] = { ++static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, +@@ -351,6 +358,8 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { + [IPROC_PCIE_OMAP3] = 0xdf8, + [IPROC_PCIE_IARR0] = 0xd00, + [IPROC_PCIE_IMAP0] = 0xc00, ++ [IPROC_PCIE_IARR1] = 0xd08, ++ [IPROC_PCIE_IMAP1] = 0xd70, + [IPROC_PCIE_IARR2] = 0xd10, + [IPROC_PCIE_IMAP2] = 0xcc0, + [IPROC_PCIE_IARR3] = 0xe00, +@@ -363,7 +372,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { + }; + + /* iProc PCIe PAXC v1 registers */ +-static const u16 iproc_pcie_reg_paxc[] = { ++static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, +@@ -372,7 +381,7 @@ static const u16 iproc_pcie_reg_paxc[] = { + }; + + /* iProc PCIe PAXC v2 registers */ +-static const u16 iproc_pcie_reg_paxc_v2[] = { ++static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { + [IPROC_PCIE_MSI_GIC_MODE] = 0x050, + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c +index bf03648c20723..745a4e0c4994f 100644 +--- a/drivers/pci/pci-acpi.c ++++ b/drivers/pci/pci-acpi.c +@@ -1060,7 +1060,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) + { + while (bus->parent) { + if (acpi_pm_device_can_wakeup(&bus->self->dev)) +- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); ++ return acpi_pm_set_device_wakeup(&bus->self->dev, enable); + + bus = bus->parent; + } +@@ -1068,7 +1068,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) + /* We have reached the root bus. */ + if (bus->bridge) { + if (acpi_pm_device_can_wakeup(bus->bridge)) +- return acpi_pm_set_bridge_wakeup(bus->bridge, enable); ++ return acpi_pm_set_device_wakeup(bus->bridge, enable); + } + return 0; + } +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index e578d34095e91..6427cbd0a5be2 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -6202,19 +6202,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, + while (*p) { + count = 0; + if (sscanf(p, "%d%n", &align_order, &count) == 1 && +- p[count] == '@') { ++ p[count] == '@') { + p += count + 1; ++ if (align_order > 63) { ++ pr_err("PCI: Invalid requested alignment (order %d)\n", ++ align_order); ++ align_order = PAGE_SHIFT; ++ } + } else { +- align_order = -1; ++ align_order = PAGE_SHIFT; + } + + ret = pci_dev_str_match(dev, p, &p); + if (ret == 1) { + *resize = true; +- if (align_order == -1) +- align = PAGE_SIZE; +- else +- align = 1 << align_order; ++ align = 1ULL << align_order; + break; + } else if (ret < 0) { + pr_err("PCI: Can't parse resource_alignment parameter: %s\n", +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index f70692ac79c56..fb1dc11e7cc52 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -5567,17 +5567,26 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev) + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); + + /* +- * Device [12d8:0x400e] and [12d8:0x400f] ++ * Device 12d8:0x400e [OHCI] and 12d8:0x400f [EHCI] ++ * + * These devices advertise PME# support in all power states but don't + * reliably assert it. ++ * ++ * These devices also advertise MSI, but documentation (PI7C9X440SL.pdf) ++ * says "The MSI Function is not implemented on this device" in chapters ++ * 7.3.27, 7.3.29-7.3.31. + */ +-static void pci_fixup_no_pme(struct pci_dev *dev) ++static void pci_fixup_no_msi_no_pme(struct pci_dev *dev) + { ++#ifdef CONFIG_PCI_MSI ++ pci_info(dev, "MSI is not implemented on this device, disabling it\n"); ++ dev->no_msi = 1; ++#endif + pci_info(dev, "PME# is unreliable, disabling it\n"); + dev->pme_support = 0; + } +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme); +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme); + + static void apex_pci_fixup_class(struct pci_dev *pdev) + { +diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c +index 3861505741e6d..ed2077e7470ae 100644 +--- a/drivers/pci/slot.c ++++ b/drivers/pci/slot.c +@@ -272,6 +272,9 @@ placeholder: + goto err; + } + ++ INIT_LIST_HEAD(&slot->list); ++ list_add(&slot->list, &parent->slots); ++ + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", slot_name); + if (err) { +@@ -279,9 +282,6 @@ placeholder: + goto err; + } + +- INIT_LIST_HEAD(&slot->list); +- list_add(&slot->list, &parent->slots); +- + down_read(&pci_bus_sem); + list_for_each_entry(dev, &parent->devices, bus_list) + if (PCI_SLOT(dev->devfn) == slot_nr) +diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig +index c8126bde9d7cc..43150608d8b62 100644 +--- a/drivers/phy/mediatek/Kconfig ++++ b/drivers/phy/mediatek/Kconfig +@@ -38,7 +38,9 @@ config PHY_MTK_XSPHY + + config PHY_MTK_HDMI + tristate "MediaTek HDMI-PHY Driver" +- depends on ARCH_MEDIATEK && OF ++ depends on ARCH_MEDIATEK || COMPILE_TEST ++ depends on COMMON_CLK ++ depends on OF + select GENERIC_PHY + help + Support HDMI PHY for Mediatek SoCs. +diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c +index 47c029d4b270b..206cc34687223 100644 +--- a/drivers/phy/mediatek/phy-mtk-hdmi.c ++++ b/drivers/phy/mediatek/phy-mtk-hdmi.c +@@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) + hdmi_phy->conf->hdmi_phy_disable_tmds) + return &mtk_hdmi_phy_dev_ops; + +- dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); +- return NULL; ++ if (hdmi_phy) ++ dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); ++ return NULL; + } + + static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +index e34e4475027ca..2cb949f931b69 100644 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +@@ -656,8 +656,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + */ + pm_runtime_enable(dev); + phy_usb2_ops = of_device_get_match_data(dev); +- if (!phy_usb2_ops) +- return -EINVAL; ++ if (!phy_usb2_ops) { ++ ret = -EINVAL; ++ goto error; ++ } + + mutex_init(&channel->lock); + for (i = 0; i < NUM_OF_PHYS; i++) { +diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c +index ad88d74c18842..181a1be5f4917 100644 +--- a/drivers/phy/tegra/xusb.c ++++ b/drivers/phy/tegra/xusb.c +@@ -688,7 +688,7 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port) + * reference to retrieve usb-phy details. + */ + port->usb_phy.dev = &lane->pad->lanes[port->index]->dev; +- port->usb_phy.dev->driver = port->padctl->dev->driver; ++ port->usb_phy.dev->driver = port->dev.driver; + port->usb_phy.otg->usb_phy = &port->usb_phy; + port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral; + port->usb_phy.otg->set_host = tegra_xusb_set_host; +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c +index 3663d87f51a01..9fc4433fece4f 100644 +--- a/drivers/pinctrl/core.c ++++ b/drivers/pinctrl/core.c +@@ -1602,9 +1602,11 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) + struct pinctrl_dev *pctldev = s->private; + const struct pinctrl_ops *ops = pctldev->desc->pctlops; + unsigned i, pin; ++#ifdef CONFIG_GPIOLIB + struct pinctrl_gpio_range *range; + unsigned int gpio_num; + struct gpio_chip *chip; ++#endif + + seq_printf(s, "registered pins: %d\n", pctldev->desc->npins); + +diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c +index 62c02b969327f..7521a924dffb0 100644 +--- a/drivers/pinctrl/pinctrl-falcon.c ++++ b/drivers/pinctrl/pinctrl-falcon.c +@@ -431,24 +431,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) + + /* load and remap the pad resources of the different banks */ + for_each_compatible_node(np, NULL, "lantiq,pad-falcon") { +- struct platform_device *ppdev = of_find_device_by_node(np); + const __be32 *bank = of_get_property(np, "lantiq,bank", NULL); + struct resource res; ++ struct platform_device *ppdev; + u32 avail; + int pins; + + if (!of_device_is_available(np)) + continue; + +- if (!ppdev) { +- dev_err(&pdev->dev, "failed to find pad pdev\n"); +- continue; +- } + if (!bank || *bank >= PORTS) + continue; + if (of_address_to_resource(np, 0, &res)) + continue; ++ ++ ppdev = of_find_device_by_node(np); ++ if (!ppdev) { ++ dev_err(&pdev->dev, "failed to find pad pdev\n"); ++ continue; ++ } ++ + falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); ++ put_device(&ppdev->dev); + if (IS_ERR(falcon_info.clk[*bank])) { + dev_err(&ppdev->dev, "failed to get clock\n"); + of_node_put(np); +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c +index 19cfd1e76ee2c..e69f6da40dc0a 100644 +--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c +@@ -677,7 +677,7 @@ static const struct sunxi_desc_pin a100_pins[] = { + SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 19)), + }; + +-static const unsigned int a100_irq_bank_map[] = { 0, 1, 2, 3, 4, 5, 6}; ++static const unsigned int a100_irq_bank_map[] = { 1, 2, 3, 4, 5, 6, 7}; + + static const struct sunxi_pinctrl_desc a100_pinctrl_data = { + .pins = a100_pins, +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +index 8e792f8e2dc9a..e42a3a0005a72 100644 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -1142,20 +1142,22 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) + if (bank == pctl->desc->irq_banks) + return; + ++ chained_irq_enter(chip, desc); ++ + reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank); + val = readl(pctl->membase + reg); + + if (val) { + int irqoffset; + +- chained_irq_enter(chip, desc); + for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) { + int pin_irq = irq_find_mapping(pctl->domain, + bank * IRQ_PER_BANK + irqoffset); + generic_handle_irq(pin_irq); + } +- chained_irq_exit(chip, desc); + } ++ ++ chained_irq_exit(chip, desc); + } + + static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl, +diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c +index dfa1f816a45f4..f9df218fc2bbe 100644 +--- a/drivers/platform/chrome/cros_ec_spi.c ++++ b/drivers/platform/chrome/cros_ec_spi.c +@@ -742,7 +742,6 @@ static int cros_ec_spi_probe(struct spi_device *spi) + int err; + + spi->bits_per_word = 8; +- spi->mode = SPI_MODE_0; + spi->rt = true; + err = spi_setup(spi); + if (err < 0) +diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c +index 2e2cd565926aa..3a1dbf1994413 100644 +--- a/drivers/platform/x86/dell-smbios-base.c ++++ b/drivers/platform/x86/dell-smbios-base.c +@@ -594,6 +594,7 @@ static int __init dell_smbios_init(void) + if (wmi && smm) { + pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", + wmi, smm); ++ ret = -ENODEV; + goto fail_create_group; + } + +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index 0419c8001fe33..3b49a1f4061bc 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -15,9 +15,13 @@ + #include <linux/platform_device.h> + #include <linux/suspend.h> + ++/* Returned when NOT in tablet mode on some HP Stream x360 11 models */ ++#define VGBS_TABLET_MODE_FLAG_ALT 0x10 + /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ +-#define TABLET_MODE_FLAG 0x40 +-#define DOCK_MODE_FLAG 0x80 ++#define VGBS_TABLET_MODE_FLAG 0x40 ++#define VGBS_DOCK_MODE_FLAG 0x80 ++ ++#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT) + + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("AceLan Kao"); +@@ -72,9 +76,9 @@ static void detect_tablet_mode(struct platform_device *device) + if (ACPI_FAILURE(status)) + return; + +- m = !(vgbs & TABLET_MODE_FLAG); ++ m = !(vgbs & VGBS_TABLET_MODE_FLAGS); + input_report_switch(priv->input_dev, SW_TABLET_MODE, m); +- m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0; ++ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; + input_report_switch(priv->input_dev, SW_DOCK, m); + } + +@@ -212,6 +216,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), ++ }, ++ }, + {} /* Array terminator */ + }; + +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c +index 986ad3dda1c10..8bce3da32a42b 100644 +--- a/drivers/platform/x86/mlx-platform.c ++++ b/drivers/platform/x86/mlx-platform.c +@@ -319,15 +319,6 @@ static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = { + }; + + /* Platform hotplug devices */ +-static struct i2c_board_info mlxplat_mlxcpld_psu[] = { +- { +- I2C_BOARD_INFO("24c02", 0x51), +- }, +- { +- I2C_BOARD_INFO("24c02", 0x50), +- }, +-}; +- + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { + { + I2C_BOARD_INFO("dps460", 0x59), +@@ -383,15 +374,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + }; + +@@ -458,7 +447,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data), + .inversed = 1, + .health = false, + }, +@@ -467,7 +456,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data), + .inversed = 0, + .health = false, + }, +@@ -476,7 +465,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data), + .inversed = 1, + .health = false, + }, +@@ -497,7 +486,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data), + .inversed = 1, + .health = false, + }, +@@ -506,7 +495,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data), + .inversed = 0, + .health = false, + }, +@@ -515,7 +504,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, +- .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data), + .inversed = 1, + .health = false, + }, +@@ -603,15 +592,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + }; + +diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c +index 9d981b76c1e72..a4df1ea923864 100644 +--- a/drivers/power/supply/axp288_charger.c ++++ b/drivers/power/supply/axp288_charger.c +@@ -548,14 +548,15 @@ out: + + /* + * The HP Pavilion x2 10 series comes in a number of variants: +- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D" +- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E" +- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4" ++ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021" ++ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D" ++ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E" ++ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4" + * +- * The variants with the AXP288 PMIC are all kinds of special: ++ * The variants with the AXP288 + Type-C connector are all kinds of special: + * +- * 1. All variants use a Type-C connector which the AXP288 does not support, so +- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices, ++ * 1. They use a Type-C connector which the AXP288 does not support, so when ++ * using a Type-C charger it is not recognized. Unlike most AXP288 devices, + * this model actually has mostly working ACPI AC / Battery code, the ACPI code + * "solves" this by simply setting the input_current_limit to 3A. + * There are still some issues with the ACPI code, so we use this native driver, +@@ -578,12 +579,17 @@ out: + */ + static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = { + { +- /* +- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry +- * Trail model has "HP", so we only match on product_name. +- */ + .matches = { +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"), + }, + }, + {} /* Terminating entry */ +diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c +index d14186525e1e9..845af0f44c022 100644 +--- a/drivers/power/supply/bq24190_charger.c ++++ b/drivers/power/supply/bq24190_charger.c +@@ -448,8 +448,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev, + return -EINVAL; + + ret = pm_runtime_get_sync(bdi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(bdi->dev); + return ret; ++ } + + ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v); + if (ret) +@@ -1077,8 +1079,10 @@ static int bq24190_charger_get_property(struct power_supply *psy, + dev_dbg(bdi->dev, "prop: %d\n", psp); + + ret = pm_runtime_get_sync(bdi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(bdi->dev); + return ret; ++ } + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_TYPE: +@@ -1149,8 +1153,10 @@ static int bq24190_charger_set_property(struct power_supply *psy, + dev_dbg(bdi->dev, "prop: %d\n", psp); + + ret = pm_runtime_get_sync(bdi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(bdi->dev); + return ret; ++ } + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: +@@ -1410,8 +1416,10 @@ static int bq24190_battery_get_property(struct power_supply *psy, + dev_dbg(bdi->dev, "prop: %d\n", psp); + + ret = pm_runtime_get_sync(bdi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(bdi->dev); + return ret; ++ } + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: +@@ -1456,8 +1464,10 @@ static int bq24190_battery_set_property(struct power_supply *psy, + dev_dbg(bdi->dev, "prop: %d\n", psp); + + ret = pm_runtime_get_sync(bdi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(bdi->dev); + return ret; ++ } + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: +diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c +index 34c21c51bac10..945c3257ca931 100644 +--- a/drivers/power/supply/bq25890_charger.c ++++ b/drivers/power/supply/bq25890_charger.c +@@ -299,7 +299,7 @@ static const union { + /* TODO: BQ25896 has max ICHG 3008 mA */ + [TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */ + [TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */ +- [TBL_IILIM] = { .rt = {50000, 3200000, 50000} }, /* uA */ ++ [TBL_IILIM] = { .rt = {100000, 3250000, 50000} }, /* uA */ + [TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */ + [TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */ + [TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */ +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c +index f284547913d6f..2e9672fe4df1f 100644 +--- a/drivers/power/supply/max17042_battery.c ++++ b/drivers/power/supply/max17042_battery.c +@@ -85,9 +85,10 @@ static enum power_supply_property max17042_battery_props[] = { + POWER_SUPPLY_PROP_TEMP_MAX, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_SCOPE, ++ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, ++ // these two have to be at the end on the list + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, +- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + }; + + static int max17042_get_temperature(struct max17042_chip *chip, int *temp) +diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c +index 333ba83006e48..a12a1ad9b5fe3 100644 +--- a/drivers/ps3/ps3stor_lib.c ++++ b/drivers/ps3/ps3stor_lib.c +@@ -189,7 +189,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) + dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf)); + dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf, + dev->bounce_size, DMA_BIDIRECTIONAL); +- if (!dev->bounce_dma) { ++ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) { + dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n", + __func__, __LINE__); + error = -ENODEV; +diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c +index c50d453552bd4..86bcafd23e4f6 100644 +--- a/drivers/pwm/pwm-imx27.c ++++ b/drivers/pwm/pwm-imx27.c +@@ -235,8 +235,9 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, + + period_cycles /= prescale; + c = clkrate * state->duty_cycle; +- do_div(c, NSEC_PER_SEC * prescale); ++ do_div(c, NSEC_PER_SEC); + duty_cycles = c; ++ duty_cycles /= prescale; + + /* + * according to imx pwm RM, the real period value should be PERIOD +diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c +index 7551253ada32b..bf3f14fb5f244 100644 +--- a/drivers/pwm/pwm-lp3943.c ++++ b/drivers/pwm/pwm-lp3943.c +@@ -275,6 +275,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev) + lp3943_pwm->chip.dev = &pdev->dev; + lp3943_pwm->chip.ops = &lp3943_pwm_ops; + lp3943_pwm->chip.npwm = LP3943_NUM_PWMS; ++ lp3943_pwm->chip.base = -1; + + platform_set_drvdata(pdev, lp3943_pwm); + +diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c +index 38a4c5c1317b2..482d5b9cec1fb 100644 +--- a/drivers/pwm/pwm-sun4i.c ++++ b/drivers/pwm/pwm-sun4i.c +@@ -294,12 +294,8 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + + ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + +- if (state->enabled) { ++ if (state->enabled) + ctrl |= BIT_CH(PWM_EN, pwm->hwpwm); +- } else { +- ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm); +- ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); +- } + + sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG); + +diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c +index e2c21cc34a96a..3763ce5311ac2 100644 +--- a/drivers/pwm/pwm-zx.c ++++ b/drivers/pwm/pwm-zx.c +@@ -238,6 +238,7 @@ static int zx_pwm_probe(struct platform_device *pdev) + ret = pwmchip_add(&zpc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); ++ clk_disable_unprepare(zpc->pclk); + return ret; + } + +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c +index cd1224182ad74..90cb8445f7216 100644 +--- a/drivers/regulator/axp20x-regulator.c ++++ b/drivers/regulator/axp20x-regulator.c +@@ -594,7 +594,7 @@ static const struct regulator_desc axp22x_regulators[] = { + AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK, + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), + AXP_DESC(AXP22X, DLDO2, "dldo2", "dldoin", 700, 3300, 100, +- AXP22X_DLDO2_V_OUT, AXP22X_PWR_OUT_DLDO2_MASK, ++ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), + AXP_DESC(AXP22X, DLDO3, "dldo3", "dldoin", 700, 3300, 100, + AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, +diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h +index 47b4561443a94..f2bcc9d9fda65 100644 +--- a/drivers/remoteproc/mtk_common.h ++++ b/drivers/remoteproc/mtk_common.h +@@ -32,22 +32,22 @@ + #define MT8183_SCP_CACHESIZE_8KB BIT(8) + #define MT8183_SCP_CACHE_CON_WAYEN BIT(10) + +-#define MT8192_L2TCM_SRAM_PD_0 0x210C0 +-#define MT8192_L2TCM_SRAM_PD_1 0x210C4 +-#define MT8192_L2TCM_SRAM_PD_2 0x210C8 +-#define MT8192_L1TCM_SRAM_PDN 0x2102C +-#define MT8192_CPU0_SRAM_PD 0x21080 +- +-#define MT8192_SCP2APMCU_IPC_SET 0x24080 +-#define MT8192_SCP2APMCU_IPC_CLR 0x24084 ++#define MT8192_L2TCM_SRAM_PD_0 0x10C0 ++#define MT8192_L2TCM_SRAM_PD_1 0x10C4 ++#define MT8192_L2TCM_SRAM_PD_2 0x10C8 ++#define MT8192_L1TCM_SRAM_PDN 0x102C ++#define MT8192_CPU0_SRAM_PD 0x1080 ++ ++#define MT8192_SCP2APMCU_IPC_SET 0x4080 ++#define MT8192_SCP2APMCU_IPC_CLR 0x4084 + #define MT8192_SCP_IPC_INT_BIT BIT(0) +-#define MT8192_SCP2SPM_IPC_CLR 0x24094 +-#define MT8192_GIPC_IN_SET 0x24098 ++#define MT8192_SCP2SPM_IPC_CLR 0x4094 ++#define MT8192_GIPC_IN_SET 0x4098 + #define MT8192_HOST_IPC_INT_BIT BIT(0) + +-#define MT8192_CORE0_SW_RSTN_CLR 0x30000 +-#define MT8192_CORE0_SW_RSTN_SET 0x30004 +-#define MT8192_CORE0_WDT_CFG 0x30034 ++#define MT8192_CORE0_SW_RSTN_CLR 0x10000 ++#define MT8192_CORE0_SW_RSTN_SET 0x10004 ++#define MT8192_CORE0_WDT_CFG 0x10034 + + #define SCP_FW_VER_LEN 32 + #define SCP_SHARE_BUFFER_SIZE 288 +diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c +index 577cbd5d421ec..52fa01d67c18e 100644 +--- a/drivers/remoteproc/mtk_scp.c ++++ b/drivers/remoteproc/mtk_scp.c +@@ -350,9 +350,10 @@ static int scp_load(struct rproc *rproc, const struct firmware *fw) + + ret = scp->data->scp_before_load(scp); + if (ret < 0) +- return ret; ++ goto leave; + + ret = scp_elf_load_segments(rproc, fw); ++leave: + clk_disable_unprepare(scp->clk); + + return ret; +@@ -772,12 +773,14 @@ static const struct mtk_scp_of_data mt8192_of_data = { + .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, + }; + ++#if defined(CONFIG_OF) + static const struct of_device_id mtk_scp_of_match[] = { + { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data }, + { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data }, + {}, + }; + MODULE_DEVICE_TABLE(of, mtk_scp_of_match); ++#endif + + static struct platform_driver mtk_scp_driver = { + .probe = scp_probe, +diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c +index efb2c1aa80a3c..9eb599701f9b0 100644 +--- a/drivers/remoteproc/qcom_q6v5_adsp.c ++++ b/drivers/remoteproc/qcom_q6v5_adsp.c +@@ -193,8 +193,10 @@ static int adsp_start(struct rproc *rproc) + + dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX); + ret = pm_runtime_get_sync(adsp->dev); +- if (ret) ++ if (ret) { ++ pm_runtime_put_noidle(adsp->dev); + goto disable_xo_clk; ++ } + + ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks); + if (ret) { +@@ -362,15 +364,12 @@ static int adsp_init_mmio(struct qcom_adsp *adsp, + struct platform_device *pdev) + { + struct device_node *syscon; +- struct resource *res; + int ret; + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start, +- resource_size(res)); +- if (!adsp->qdsp6ss_base) { ++ adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(adsp->qdsp6ss_base)) { + dev_err(adsp->dev, "failed to map QDSP6SS registers\n"); +- return -ENOMEM; ++ return PTR_ERR(adsp->qdsp6ss_base); + } + + syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0); +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c +index eb3457a6c3b73..ba6f7551242de 100644 +--- a/drivers/remoteproc/qcom_q6v5_mss.c ++++ b/drivers/remoteproc/qcom_q6v5_mss.c +@@ -349,8 +349,11 @@ static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(pds[i]); ++ dev_pm_genpd_set_performance_state(pds[i], 0); + goto unroll_pd_votes; ++ } + } + + return 0; +diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c +index 3837f23995e05..0678b417707ef 100644 +--- a/drivers/remoteproc/qcom_q6v5_pas.c ++++ b/drivers/remoteproc/qcom_q6v5_pas.c +@@ -90,8 +90,11 @@ static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds, + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(pds[i]); ++ dev_pm_genpd_set_performance_state(pds[i], 0); + goto unroll_pd_votes; ++ } + } + + return 0; +diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c +index 9eb2f6bccea63..b37b111b15b39 100644 +--- a/drivers/remoteproc/qcom_sysmon.c ++++ b/drivers/remoteproc/qcom_sysmon.c +@@ -22,6 +22,9 @@ struct qcom_sysmon { + struct rproc_subdev subdev; + struct rproc *rproc; + ++ int state; ++ struct mutex state_lock; ++ + struct list_head node; + + const char *name; +@@ -448,7 +451,10 @@ static int sysmon_prepare(struct rproc_subdev *subdev) + .ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP + }; + ++ mutex_lock(&sysmon->state_lock); ++ sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); ++ mutex_unlock(&sysmon->state_lock); + + return 0; + } +@@ -472,20 +478,25 @@ static int sysmon_start(struct rproc_subdev *subdev) + .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP + }; + ++ mutex_lock(&sysmon->state_lock); ++ sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); ++ mutex_unlock(&sysmon->state_lock); + + mutex_lock(&sysmon_lock); + list_for_each_entry(target, &sysmon_list, node) { +- if (target == sysmon || +- target->rproc->state != RPROC_RUNNING) ++ if (target == sysmon) + continue; + ++ mutex_lock(&target->state_lock); + event.subsys_name = target->name; ++ event.ssr_event = target->state; + + if (sysmon->ssctl_version == 2) + ssctl_send_event(sysmon, &event); + else if (sysmon->ept) + sysmon_send_event(sysmon, &event); ++ mutex_unlock(&target->state_lock); + } + mutex_unlock(&sysmon_lock); + +@@ -500,7 +511,10 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed) + .ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN + }; + ++ mutex_lock(&sysmon->state_lock); ++ sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); ++ mutex_unlock(&sysmon->state_lock); + + /* Don't request graceful shutdown if we've crashed */ + if (crashed) +@@ -521,7 +535,10 @@ static void sysmon_unprepare(struct rproc_subdev *subdev) + .ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN + }; + ++ mutex_lock(&sysmon->state_lock); ++ sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); ++ mutex_unlock(&sysmon->state_lock); + } + + /** +@@ -534,11 +551,10 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event, + void *data) + { + struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb); +- struct rproc *rproc = sysmon->rproc; + struct sysmon_event *sysmon_event = data; + + /* Skip non-running rprocs and the originating instance */ +- if (rproc->state != RPROC_RUNNING || ++ if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP || + !strcmp(sysmon_event->subsys_name, sysmon->name)) { + dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name); + return NOTIFY_DONE; +@@ -591,6 +607,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, + init_completion(&sysmon->ind_comp); + init_completion(&sysmon->shutdown_comp); + mutex_init(&sysmon->lock); ++ mutex_init(&sysmon->state_lock); + + sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node, + "shutdown-ack"); +diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c +index 9011e477290ce..863c0214e0a8e 100644 +--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c ++++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c +@@ -445,10 +445,10 @@ static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev, + + kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); +- if (IS_ERR(kproc->mem[i].cpu_addr)) { ++ if (!kproc->mem[i].cpu_addr) { + dev_err(dev, "failed to map %s memory\n", + data->mems[i].name); +- return PTR_ERR(kproc->mem[i].cpu_addr); ++ return -ENOMEM; + } + kproc->mem[i].bus_addr = res->start; + kproc->mem[i].dev_addr = data->mems[i].dev_addr; +diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c +index 8ec9ea1ca72e1..6f90b85a58140 100644 +--- a/drivers/rtc/rtc-ep93xx.c ++++ b/drivers/rtc/rtc-ep93xx.c +@@ -33,7 +33,7 @@ struct ep93xx_rtc { + static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, + unsigned short *delete) + { +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); + unsigned long comp; + + comp = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP); +@@ -51,7 +51,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, + + static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) + { +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); + unsigned long time; + + time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA); +@@ -62,7 +62,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) + + static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm) + { +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); + unsigned long secs = rtc_tm_to_time64(tm); + + writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD); +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index 07a5630ec841f..4d9711d51f8f3 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -243,10 +243,8 @@ static int pcf2127_nvmem_read(void *priv, unsigned int offset, + if (ret) + return ret; + +- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD, +- val, bytes); +- +- return ret ?: bytes; ++ return regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD, ++ val, bytes); + } + + static int pcf2127_nvmem_write(void *priv, unsigned int offset, +@@ -261,10 +259,8 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset, + if (ret) + return ret; + +- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD, +- val, bytes); +- +- return ret ?: bytes; ++ return regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD, ++ val, bytes); + } + + /* watchdog driver */ +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c +index 99f86612f7751..dc78a523a69f2 100644 +--- a/drivers/s390/block/dasd_alias.c ++++ b/drivers/s390/block/dasd_alias.c +@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + return; + device->discipline->get_uid(device, &uid); + spin_lock_irqsave(&lcu->lock, flags); +- list_del_init(&device->alias_list); + /* make sure that the workers don't use this device */ + if (device == lcu->suc_data.device) { + spin_unlock_irqrestore(&lcu->lock, flags); +@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + + spin_lock_irqsave(&aliastree.lock, flags); + spin_lock(&lcu->lock); ++ list_del_init(&device->alias_list); + if (list_empty(&lcu->grouplist) && + list_empty(&lcu->active_devices) && + list_empty(&lcu->inactive_devices)) { +@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device, + spin_unlock_irqrestore(&lcu->lock, flags); + + rc = dasd_sleep_on(cqr); +- if (rc && !suborder_not_supported(cqr)) { ++ if (!rc) ++ goto out; ++ ++ if (suborder_not_supported(cqr)) { ++ /* suborder not supported or device unusable for IO */ ++ rc = -EOPNOTSUPP; ++ } else { ++ /* IO failed but should be retried */ + spin_lock_irqsave(&lcu->lock, flags); + lcu->flags |= NEED_UAC_UPDATE; + spin_unlock_irqrestore(&lcu->lock, flags); + } ++out: + dasd_sfree_request(cqr, cqr->memdev); + return rc; + } +@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) + return rc; + + spin_lock_irqsave(&lcu->lock, flags); ++ /* ++ * there is another update needed skip the remaining handling ++ * the data might already be outdated ++ * but especially do not add the device to an LCU with pending ++ * update ++ */ ++ if (lcu->flags & NEED_UAC_UPDATE) ++ goto out; + lcu->pav = NO_PAV; + for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { + switch (lcu->uac->unit[i].ua_type) { +@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) + alias_list) { + _add_device_to_lcu(lcu, device, refdev); + } ++out: + spin_unlock_irqrestore(&lcu->lock, flags); + return 0; + } +@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device) + } + if (lcu->flags & UPDATE_PENDING) { + list_move(&device->alias_list, &lcu->active_devices); ++ private->pavgroup = NULL; + _schedule_lcu_update(lcu, device); + } + spin_unlock_irqrestore(&lcu->lock, flags); +diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c +index b29fe8d50baf2..33280ca181e95 100644 +--- a/drivers/s390/cio/device.c ++++ b/drivers/s390/cio/device.c +@@ -1664,10 +1664,10 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev) + struct io_subchannel_private *io_priv = to_io_private(sch); + + set_io_private(sch, NULL); +- put_device(&sch->dev); +- put_device(&cdev->dev); + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); ++ put_device(&sch->dev); ++ put_device(&cdev->dev); + kfree(io_priv); + } + +diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c +index e3e157a749880..1b1da162f5f6b 100644 +--- a/drivers/scsi/aacraid/commctrl.c ++++ b/drivers/scsi/aacraid/commctrl.c +@@ -25,6 +25,7 @@ + #include <linux/completion.h> + #include <linux/dma-mapping.h> + #include <linux/blkdev.h> ++#include <linux/compat.h> + #include <linux/delay.h> /* ssleep prototype */ + #include <linux/kthread.h> + #include <linux/uaccess.h> +@@ -226,6 +227,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) + return status; + } + ++struct compat_fib_ioctl { ++ u32 fibctx; ++ s32 wait; ++ compat_uptr_t fib; ++}; ++ + /** + * next_getadapter_fib - get the next fib + * @dev: adapter to use +@@ -243,8 +250,19 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) + struct list_head * entry; + unsigned long flags; + +- if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) +- return -EFAULT; ++ if (in_compat_syscall()) { ++ struct compat_fib_ioctl cf; ++ ++ if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl))) ++ return -EFAULT; ++ ++ f.fibctx = cf.fibctx; ++ f.wait = cf.wait; ++ f.fib = compat_ptr(cf.fib); ++ } else { ++ if (copy_from_user(&f, arg, sizeof(struct fib_ioctl))) ++ return -EFAULT; ++ } + /* + * Verify that the HANDLE passed in was a valid AdapterFibContext + * +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c +index 8f3772480582c..0a82afaf40285 100644 +--- a/drivers/scsi/aacraid/linit.c ++++ b/drivers/scsi/aacraid/linit.c +@@ -1182,63 +1182,6 @@ static long aac_cfg_ioctl(struct file *file, + return aac_do_ioctl(aac, cmd, (void __user *)arg); + } + +-#ifdef CONFIG_COMPAT +-static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg) +-{ +- long ret; +- switch (cmd) { +- case FSACTL_MINIPORT_REV_CHECK: +- case FSACTL_SENDFIB: +- case FSACTL_OPEN_GET_ADAPTER_FIB: +- case FSACTL_CLOSE_GET_ADAPTER_FIB: +- case FSACTL_SEND_RAW_SRB: +- case FSACTL_GET_PCI_INFO: +- case FSACTL_QUERY_DISK: +- case FSACTL_DELETE_DISK: +- case FSACTL_FORCE_DELETE_DISK: +- case FSACTL_GET_CONTAINERS: +- case FSACTL_SEND_LARGE_FIB: +- ret = aac_do_ioctl(dev, cmd, (void __user *)arg); +- break; +- +- case FSACTL_GET_NEXT_ADAPTER_FIB: { +- struct fib_ioctl __user *f; +- +- f = compat_alloc_user_space(sizeof(*f)); +- ret = 0; +- if (clear_user(f, sizeof(*f))) +- ret = -EFAULT; +- if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32))) +- ret = -EFAULT; +- if (!ret) +- ret = aac_do_ioctl(dev, cmd, f); +- break; +- } +- +- default: +- ret = -ENOIOCTLCMD; +- break; +- } +- return ret; +-} +- +-static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd, +- void __user *arg) +-{ +- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; +- if (!capable(CAP_SYS_RAWIO)) +- return -EPERM; +- return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); +-} +- +-static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) +-{ +- if (!capable(CAP_SYS_RAWIO)) +- return -EPERM; +- return aac_compat_do_ioctl(file->private_data, cmd, arg); +-} +-#endif +- + static ssize_t aac_show_model(struct device *device, + struct device_attribute *attr, char *buf) + { +@@ -1523,7 +1466,7 @@ static const struct file_operations aac_cfg_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = aac_cfg_ioctl, + #ifdef CONFIG_COMPAT +- .compat_ioctl = aac_compat_cfg_ioctl, ++ .compat_ioctl = aac_cfg_ioctl, + #endif + .open = aac_cfg_open, + .llseek = noop_llseek, +@@ -1536,7 +1479,7 @@ static struct scsi_host_template aac_driver_template = { + .info = aac_info, + .ioctl = aac_ioctl, + #ifdef CONFIG_COMPAT +- .compat_ioctl = aac_compat_ioctl, ++ .compat_ioctl = aac_ioctl, + #endif + .queuecommand = aac_queuecommand, + .bios_param = aac_biosparm, +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index 5f8a7ef8f6a8e..4f7befb43d604 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -740,6 +740,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + for (i = 0; i < FNIC_IO_LOCKS; i++) + spin_lock_init(&fnic->io_req_lock[i]); + ++ err = -ENOMEM; + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) + goto err_out_free_resources; +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +index 960de375ce699..2cbd8a524edab 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +@@ -2409,8 +2409,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) + DRV_NAME " phy", hisi_hba); + if (rc) { + dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); +- rc = -ENOENT; +- goto free_irq_vectors; ++ return -ENOENT; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), +@@ -2418,8 +2417,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) + DRV_NAME " channel", hisi_hba); + if (rc) { + dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); +- rc = -ENOENT; +- goto free_irq_vectors; ++ return -ENOENT; + } + + rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), +@@ -2427,8 +2425,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); +- rc = -ENOENT; +- goto free_irq_vectors; ++ return -ENOENT; + } + + if (hisi_sas_intr_conv) +@@ -2449,8 +2446,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) + if (rc) { + dev_err(dev, "could not request cq%d interrupt, rc=%d\n", + i, rc); +- rc = -ENOENT; +- goto free_irq_vectors; ++ return -ENOENT; + } + cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); + if (!cq->irq_mask) { +@@ -2460,10 +2456,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) + } + + return 0; +- +-free_irq_vectors: +- pci_free_irq_vectors(pdev); +- return rc; + } + + static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) +@@ -3317,11 +3309,11 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + rc = interrupt_preinit_v3_hw(hisi_hba); + if (rc) +- goto err_out_ha; ++ goto err_out_debugfs; + dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); + rc = scsi_add_host(shost, dev); + if (rc) +- goto err_out_ha; ++ goto err_out_free_irq_vectors; + + rc = sas_register_ha(sha); + if (rc) +@@ -3348,8 +3340,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + err_out_register_ha: + scsi_remove_host(shost); +-err_out_ha: ++err_out_free_irq_vectors: ++ pci_free_irq_vectors(pdev); ++err_out_debugfs: + hisi_sas_debugfs_exit(hisi_hba); ++err_out_ha: ++ hisi_sas_free(hisi_hba); + scsi_host_put(shost); + err_out_regions: + pci_release_regions(pdev); +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h +index 549adfaa97ce5..93e507677bdcb 100644 +--- a/drivers/scsi/lpfc/lpfc.h ++++ b/drivers/scsi/lpfc/lpfc.h +@@ -753,7 +753,7 @@ struct lpfc_hba { + #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ + #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ + #define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ +-#define ELS_XRI_ABORT_EVENT 0x40 ++#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ + #define ASYNC_EVENT 0x80 + #define LINK_DISABLED 0x100 /* Link disabled by user */ + #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ +diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h +index 482e4a888daec..1437e44ade801 100644 +--- a/drivers/scsi/lpfc/lpfc_disc.h ++++ b/drivers/scsi/lpfc/lpfc_disc.h +@@ -41,6 +41,7 @@ enum lpfc_work_type { + LPFC_EVT_DEV_LOSS, + LPFC_EVT_FASTPATH_MGMT_EVT, + LPFC_EVT_RESET_HBA, ++ LPFC_EVT_RECOVER_PORT + }; + + /* structure used to queue event to the discovery tasklet */ +@@ -128,6 +129,7 @@ struct lpfc_nodelist { + struct lpfc_vport *vport; + struct lpfc_work_evt els_retry_evt; + struct lpfc_work_evt dev_loss_evt; ++ struct lpfc_work_evt recovery_evt; + struct kref kref; + atomic_t cmd_pending; + uint32_t cmd_qdepth; +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c +index bb02fd8bc2ddf..9746d2f4fcfad 100644 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c +@@ -552,6 +552,15 @@ lpfc_work_list_done(struct lpfc_hba *phba) + fcf_inuse, + nlp_did); + break; ++ case LPFC_EVT_RECOVER_PORT: ++ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); ++ lpfc_sli_abts_recover_port(ndlp->vport, ndlp); ++ free_evt = 0; ++ /* decrement the node reference count held for ++ * this queued work ++ */ ++ lpfc_nlp_put(ndlp); ++ break; + case LPFC_EVT_ONLINE: + if (phba->link_state < LPFC_LINK_DOWN) + *(int *) (evtp->evt_arg1) = lpfc_online(phba); +@@ -4515,6 +4524,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); + timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); ++ INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); ++ + ndlp->nlp_DID = did; + ndlp->vport = vport; + ndlp->phba = vport->phba; +@@ -5011,6 +5022,29 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + mempool_free(mbox, phba->mbox_mem_pool); + acc_plogi = 1; + } ++ } else { ++ lpfc_printf_vlog(vport, KERN_INFO, ++ LOG_NODE | LOG_DISCOVERY, ++ "1444 Failed to allocate mempool " ++ "unreg_rpi UNREG x%x, " ++ "DID x%x, flag x%x, " ++ "ndlp x%px\n", ++ ndlp->nlp_rpi, ndlp->nlp_DID, ++ ndlp->nlp_flag, ndlp); ++ ++ /* Because mempool_alloc failed, we ++ * will issue a LOGO here and keep the rpi alive if ++ * not unloading. ++ */ ++ if (!(vport->load_flag & FC_UNLOADING)) { ++ ndlp->nlp_flag &= ~NLP_UNREG_INP; ++ lpfc_issue_els_logo(vport, ndlp, 0); ++ ndlp->nlp_prev_state = ndlp->nlp_state; ++ lpfc_nlp_set_state(vport, ndlp, ++ NLP_STE_NPR_NODE); ++ } ++ ++ return 1; + } + lpfc_no_rpi(phba, ndlp); + out: +@@ -5214,6 +5248,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + + list_del_init(&ndlp->els_retry_evt.evt_listp); + list_del_init(&ndlp->dev_loss_evt.evt_listp); ++ list_del_init(&ndlp->recovery_evt.evt_listp); + lpfc_cleanup_vports_rrqs(vport, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index ca25e54bb7824..40fe889033d43 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -5958,18 +5958,21 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, + void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) + { + struct lpfc_cq_event *cq_event; ++ unsigned long iflags; + + /* First, declare the async event has been handled */ +- spin_lock_irq(&phba->hbalock); ++ spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag &= ~ASYNC_EVENT; +- spin_unlock_irq(&phba->hbalock); ++ spin_unlock_irqrestore(&phba->hbalock, iflags); ++ + /* Now, handle all the async events */ ++ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { +- /* Get the first event from the head of the event queue */ +- spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, + cq_event, struct lpfc_cq_event, list); +- spin_unlock_irq(&phba->hbalock); ++ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, ++ iflags); ++ + /* Process the asynchronous event */ + switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { + case LPFC_TRAILER_CODE_LINK: +@@ -6001,9 +6004,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) + &cq_event->cqe.mcqe_cmpl)); + break; + } ++ + /* Free the completion event processed to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); ++ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + } ++ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + } + + /** +@@ -6630,6 +6636,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) + /* This abort list used by worker thread */ + spin_lock_init(&phba->sli4_hba.sgl_list_lock); + spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); ++ spin_lock_init(&phba->sli4_hba.asynce_list_lock); ++ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); + + /* + * Initialize driver internal slow-path work queues +@@ -6641,8 +6649,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) + INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); + /* Asynchronous event CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); +- /* Fast-path XRI aborted CQ Event work queue list */ +- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Slow-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Receive queue CQ Event work queue list */ +@@ -10174,26 +10180,28 @@ lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + static void + lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) + { +- LIST_HEAD(cqelist); +- struct lpfc_cq_event *cqe; ++ LIST_HEAD(cq_event_list); ++ struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* Retrieve all the pending WCQEs from pending WCQE lists */ +- spin_lock_irqsave(&phba->hbalock, iflags); +- /* Pending FCP XRI abort events */ +- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, +- &cqelist); ++ + /* Pending ELS XRI abort events */ ++ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, +- &cqelist); ++ &cq_event_list); ++ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); ++ + /* Pending asynnc events */ ++ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, +- &cqelist); +- spin_unlock_irqrestore(&phba->hbalock, iflags); ++ &cq_event_list); ++ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + +- while (!list_empty(&cqelist)) { +- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); +- lpfc_sli4_cq_event_release(phba, cqe); ++ while (!list_empty(&cq_event_list)) { ++ list_remove_head(&cq_event_list, cq_event, ++ struct lpfc_cq_event, list); ++ lpfc_sli4_cq_event_release(phba, cq_event); + } + } + +diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c +index 27ff67e9edae7..be54fbf5146f1 100644 +--- a/drivers/scsi/lpfc/lpfc_mem.c ++++ b/drivers/scsi/lpfc/lpfc_mem.c +@@ -46,6 +46,7 @@ + #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ + #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ + #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ ++#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ + + int + lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { +@@ -111,8 +112,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) + pool->current_count++; + } + +- phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, +- sizeof(LPFC_MBOXQ_t)); ++ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, ++ sizeof(LPFC_MBOXQ_t)); + if (!phba->mbox_mem_pool) + goto fail_free_mbuf_pool; + +@@ -588,8 +589,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * +- * Notes: Not interrupt-safe. Must be called with no locks held. +- * + * Returns: + * pointer to HBQ on success + * NULL on failure +@@ -599,7 +598,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) + { + struct rqb_dmabuf *dma_buf; + +- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); ++ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); + if (!dma_buf) + return NULL; + +@@ -722,7 +721,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); + if (rc < 0) { +- (rqbp->rqb_free_buffer)(phba, rqb_entry); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6409 Cannot post to HRQ %d: %x %x %x " + "DRQ %x %x\n", +@@ -732,6 +730,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) + rqb_entry->hrq->entry_count, + rqb_entry->drq->host_index, + rqb_entry->drq->hba_index); ++ (rqbp->rqb_free_buffer)(phba, rqb_entry); + } else { + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); + rqbp->buffer_count++; +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c +index 0c39ed50998c8..69f1a0457f51e 100644 +--- a/drivers/scsi/lpfc/lpfc_nvme.c ++++ b/drivers/scsi/lpfc/lpfc_nvme.c +@@ -2280,6 +2280,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, + int ret, i, pending = 0; + struct lpfc_sli_ring *pring; + struct lpfc_hba *phba = vport->phba; ++ struct lpfc_sli4_hdw_queue *qp; ++ int abts_scsi, abts_nvme; + + /* Host transport has to clean up and confirm requiring an indefinite + * wait. Print a message if a 10 second wait expires and renew the +@@ -2290,17 +2292,23 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, + ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); + if (unlikely(!ret)) { + pending = 0; ++ abts_scsi = 0; ++ abts_nvme = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { +- pring = phba->sli4_hba.hdwq[i].io_wq->pring; ++ qp = &phba->sli4_hba.hdwq[i]; ++ pring = qp->io_wq->pring; + if (!pring) + continue; +- if (pring->txcmplq_cnt) +- pending += pring->txcmplq_cnt; ++ pending += pring->txcmplq_cnt; ++ abts_scsi += qp->abts_scsi_io_bufs; ++ abts_nvme += qp->abts_nvme_io_bufs; + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6176 Lport x%px Localport x%px wait " +- "timed out. Pending %d. Renewing.\n", +- lport, vport->localport, pending); ++ "timed out. Pending %d [%d:%d]. " ++ "Renewing.\n", ++ lport, vport->localport, pending, ++ abts_scsi, abts_nvme); + continue; + } + break; +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index e158cd77d387f..fcaafa564dfcd 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -7248,12 +7248,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct rqb_dmabuf *rqb_buffer; + LIST_HEAD(rqb_buf_list); + +- spin_lock_irqsave(&phba->hbalock, flags); + rqbp = hrq->rqbp; + for (i = 0; i < count; i++) { ++ spin_lock_irqsave(&phba->hbalock, flags); + /* IF RQ is already full, don't bother */ +- if (rqbp->buffer_count + i >= rqbp->entry_count - 1) ++ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { ++ spin_unlock_irqrestore(&phba->hbalock, flags); + break; ++ } ++ spin_unlock_irqrestore(&phba->hbalock, flags); ++ + rqb_buffer = rqbp->rqb_alloc_buffer(phba); + if (!rqb_buffer) + break; +@@ -7262,6 +7266,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + rqb_buffer->idx = idx; + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); + } ++ ++ spin_lock_irqsave(&phba->hbalock, flags); + while (!list_empty(&rqb_buf_list)) { + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, + hbuf.list); +@@ -10364,6 +10370,32 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) + return 0; + } + ++static void ++lpfc_sli_post_recovery_event(struct lpfc_hba *phba, ++ struct lpfc_nodelist *ndlp) ++{ ++ unsigned long iflags; ++ struct lpfc_work_evt *evtp = &ndlp->recovery_evt; ++ ++ spin_lock_irqsave(&phba->hbalock, iflags); ++ if (!list_empty(&evtp->evt_listp)) { ++ spin_unlock_irqrestore(&phba->hbalock, iflags); ++ return; ++ } ++ ++ /* Incrementing the reference count until the queued work is done. */ ++ evtp->evt_arg1 = lpfc_nlp_get(ndlp); ++ if (!evtp->evt_arg1) { ++ spin_unlock_irqrestore(&phba->hbalock, iflags); ++ return; ++ } ++ evtp->evt = LPFC_EVT_RECOVER_PORT; ++ list_add_tail(&evtp->evt_listp, &phba->work_list); ++ spin_unlock_irqrestore(&phba->hbalock, iflags); ++ ++ lpfc_worker_wake_up(phba); ++} ++ + /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to iocb object. +@@ -10454,7 +10486,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, + ext_status = axri->parameter & IOERR_PARAM_MASK; + if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && + ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) +- lpfc_sli_abts_recover_port(vport, ndlp); ++ lpfc_sli_post_recovery_event(phba, ndlp); + } + + /** +@@ -13062,23 +13094,30 @@ lpfc_sli_intr_handler(int irq, void *dev_id) + void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) + { + struct lpfc_cq_event *cq_event; ++ unsigned long iflags; + + /* First, declare the els xri abort event has been handled */ +- spin_lock_irq(&phba->hbalock); ++ spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; +- spin_unlock_irq(&phba->hbalock); ++ spin_unlock_irqrestore(&phba->hbalock, iflags); ++ + /* Now, handle all the els xri abort events */ ++ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ +- spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); +- spin_unlock_irq(&phba->hbalock); ++ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, ++ iflags); + /* Notify aborted XRI for ELS work queue */ + lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); ++ + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); ++ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, ++ iflags); + } ++ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + } + + /** +@@ -13289,9 +13328,13 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) + cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); + if (!cq_event) + return false; +- spin_lock_irqsave(&phba->hbalock, iflags); ++ ++ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); ++ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); ++ + /* Set the async event flag */ ++ spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= ASYNC_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + +@@ -13566,17 +13609,20 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, + break; + case LPFC_NVME_LS: /* NVME LS uses ELS resources */ + case LPFC_ELS: +- cq_event = lpfc_cq_event_setup( +- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); +- if (!cq_event) +- return false; ++ cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); ++ if (!cq_event) { ++ workposted = false; ++ break; ++ } + cq_event->hdwq = cq->hdwq; +- spin_lock_irqsave(&phba->hbalock, iflags); ++ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, ++ iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Set the els xri abort event flag */ + phba->hba_flag |= ELS_XRI_ABORT_EVENT; +- spin_unlock_irqrestore(&phba->hbalock, iflags); ++ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, ++ iflags); + workposted = true; + break; + default: +diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h +index a966cdeb52ee7..100cb1a94811b 100644 +--- a/drivers/scsi/lpfc/lpfc_sli4.h ++++ b/drivers/scsi/lpfc/lpfc_sli4.h +@@ -920,8 +920,9 @@ struct lpfc_sli4_hba { + struct list_head sp_queue_event; + struct list_head sp_cqe_event_pool; + struct list_head sp_asynce_work_queue; +- struct list_head sp_fcp_xri_aborted_work_queue; ++ spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */ + struct list_head sp_els_xri_aborted_work_queue; ++ spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */ + struct list_head sp_unsol_work_queue; + struct lpfc_sli4_link link_state; + struct lpfc_sli4_lnk_info lnk_info; +@@ -1103,8 +1104,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *); + void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); + int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, + void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); +-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); +-void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); ++void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba); + void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, + struct lpfc_io_buf *lpfc_ncmd); +diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c +index 3cf3e58b69799..2025361b36e96 100644 +--- a/drivers/scsi/pm8001/pm8001_init.c ++++ b/drivers/scsi/pm8001/pm8001_init.c +@@ -1131,7 +1131,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev, + + pm8001_init_sas_add(pm8001_ha); + /* phy setting support for motherboard controller */ +- if (pm8001_configure_phy_settings(pm8001_ha)) ++ rc = pm8001_configure_phy_settings(pm8001_ha); ++ if (rc) + goto err_out_shost; + + pm8001_post_sas_ha_init(shost, chip); +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c +index 7593f248afb2c..155382ce84698 100644 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c +@@ -3363,7 +3363,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) +- msleep(200);/*delay a moment to wait disk to spinup*/ ++ mdelay(200); /* delay a moment to wait for disk to spin up */ + pm8001_bytes_dmaed(pm8001_ha, phy_id); + } + +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c +index 61fab01d2d527..f5fc7f518f8af 100644 +--- a/drivers/scsi/qedi/qedi_main.c ++++ b/drivers/scsi/qedi/qedi_main.c +@@ -2766,7 +2766,7 @@ retry_probe: + QEDI_ERR(&qedi->dbg_ctx, + "Unable to start offload thread!\n"); + rc = -ENODEV; +- goto free_cid_que; ++ goto free_tmf_thread; + } + + INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler); +@@ -2790,6 +2790,8 @@ retry_probe: + + return 0; + ++free_tmf_thread: ++ destroy_workqueue(qedi->tmf_thread); + free_cid_que: + qedi_release_cid_que(qedi); + free_uio: +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index 898c70b8ebbf6..52e8b555bd1dc 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -1268,9 +1268,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) + lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; + + ql_dbg(ql_dbg_disc, vha, 0x211b, +- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", ++ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, +- fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc"); ++ fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, ++ NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { +@@ -1932,26 +1933,58 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) + break; + } + +- /* +- * Retry PRLI with other FC-4 type if failure occurred on dual +- * FCP/NVMe port +- */ +- if (NVME_FCP_TARGET(ea->fcport)) { +- ql_dbg(ql_dbg_disc, vha, 0x2118, +- "%s %d %8phC post %s prli\n", +- __func__, __LINE__, ea->fcport->port_name, +- (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ? +- "NVMe" : "FCP"); +- if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) ++ ql_dbg(ql_dbg_disc, vha, 0x2118, ++ "%s %d %8phC priority %s, fc4type %x\n", ++ __func__, __LINE__, ea->fcport->port_name, ++ vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? ++ "FCP" : "NVMe", ea->fcport->fc4_type); ++ ++ if (N2N_TOPO(vha->hw)) { ++ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) { + ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; +- else ++ ea->fcport->fc4_type |= FS_FC4TYPE_FCP; ++ } else { + ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; +- } ++ ea->fcport->fc4_type |= FS_FC4TYPE_NVME; ++ } + +- ea->fcport->flags &= ~FCF_ASYNC_SENT; +- ea->fcport->keep_nport_handle = 0; +- ea->fcport->logout_on_delete = 1; +- qlt_schedule_sess_for_deletion(ea->fcport); ++ if (ea->fcport->n2n_link_reset_cnt < 3) { ++ ea->fcport->n2n_link_reset_cnt++; ++ vha->relogin_jif = jiffies + 2 * HZ; ++ /* ++ * PRLI failed. Reset link to kick start ++ * state machine ++ */ ++ set_bit(N2N_LINK_RESET, &vha->dpc_flags); ++ } else { ++ ql_log(ql_log_warn, vha, 0x2119, ++ "%s %d %8phC Unable to reconnect\n", ++ __func__, __LINE__, ++ ea->fcport->port_name); ++ } ++ } else { ++ /* ++ * switch connect. login failed. Take connection down ++ * and allow relogin to retrigger ++ */ ++ if (NVME_FCP_TARGET(ea->fcport)) { ++ ql_dbg(ql_dbg_disc, vha, 0x2118, ++ "%s %d %8phC post %s prli\n", ++ __func__, __LINE__, ++ ea->fcport->port_name, ++ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ++ ? "NVMe" : "FCP"); ++ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) ++ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; ++ else ++ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; ++ } ++ ++ ea->fcport->flags &= ~FCF_ASYNC_SENT; ++ ea->fcport->keep_nport_handle = 0; ++ ea->fcport->logout_on_delete = 1; ++ qlt_schedule_sess_for_deletion(ea->fcport); ++ } + break; + } + } +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index 07afd0d8a8f3e..d6325fb2ef73b 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -1129,7 +1129,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) + if (ha->flags.scm_supported_a && + (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { + ha->flags.scm_supported_f = 1; +- ha->sf_init_cb->flags |= BIT_13; ++ ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); + } + ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", + (ha->flags.scm_supported_f) ? "Supported" : +@@ -1137,9 +1137,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) + + if (vha->flags.nvme2_enabled) { + /* set BIT_15 of special feature control block for SLER */ +- ha->sf_init_cb->flags |= BIT_15; ++ ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); + /* set BIT_14 of special feature control block for PI CTRL*/ +- ha->sf_init_cb->flags |= BIT_14; ++ ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); + } + } + +@@ -3998,9 +3998,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, + fcport->scan_state = QLA_FCPORT_FOUND; + fcport->n2n_flag = 1; + fcport->keep_nport_handle = 1; +- fcport->fc4_type = FS_FC4TYPE_FCP; +- if (vha->flags.nvme_enabled) +- fcport->fc4_type |= FS_FC4TYPE_NVME; + + if (wwn_to_u64(vha->port_name) > + wwn_to_u64(fcport->port_name)) { +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c +index bd8623ee156a6..26c13a953b975 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -928,7 +928,8 @@ qla27xx_template_checksum(void *p, ulong size) + static inline int + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) + { +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0; ++ return qla27xx_template_checksum(tmp, ++ le32_to_cpu(tmp->template_size)) == 0; + } + + static inline int +@@ -944,7 +945,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + tmp = memcpy(buf, tmp, len); + ql27xx_edit_template(vha, tmp); + qla27xx_walk_template(vha, tmp, buf, &len); +@@ -960,7 +961,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + qla27xx_walk_template(vha, tmp, NULL, &len); + } + +@@ -972,7 +973,7 @@ qla27xx_fwdt_template_size(void *p) + { + struct qla27xx_fwdt_template *tmp = p; + +- return tmp->template_size; ++ return le32_to_cpu(tmp->template_size); + } + + int +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h +index c47184db50813..6e0987edfcebc 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h +@@ -12,7 +12,7 @@ + struct __packed qla27xx_fwdt_template { + __le32 template_type; + __le32 entry_offset; +- uint32_t template_size; ++ __le32 template_size; + uint32_t count; /* borrow field for running/residual count */ + + __le32 entry_count; +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 03c6d0620bfd0..2d17137f8ff3b 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -2948,6 +2948,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev) + } + EXPORT_SYMBOL(sdev_enable_disk_events); + ++static unsigned char designator_prio(const unsigned char *d) ++{ ++ if (d[1] & 0x30) ++ /* not associated with LUN */ ++ return 0; ++ ++ if (d[3] == 0) ++ /* invalid length */ ++ return 0; ++ ++ /* ++ * Order of preference for lun descriptor: ++ * - SCSI name string ++ * - NAA IEEE Registered Extended ++ * - EUI-64 based 16-byte ++ * - EUI-64 based 12-byte ++ * - NAA IEEE Registered ++ * - NAA IEEE Extended ++ * - EUI-64 based 8-byte ++ * - SCSI name string (truncated) ++ * - T10 Vendor ID ++ * as longer descriptors reduce the likelyhood ++ * of identification clashes. ++ */ ++ ++ switch (d[1] & 0xf) { ++ case 8: ++ /* SCSI name string, variable-length UTF-8 */ ++ return 9; ++ case 3: ++ switch (d[4] >> 4) { ++ case 6: ++ /* NAA registered extended */ ++ return 8; ++ case 5: ++ /* NAA registered */ ++ return 5; ++ case 4: ++ /* NAA extended */ ++ return 4; ++ case 3: ++ /* NAA locally assigned */ ++ return 1; ++ default: ++ break; ++ } ++ break; ++ case 2: ++ switch (d[3]) { ++ case 16: ++ /* EUI64-based, 16 byte */ ++ return 7; ++ case 12: ++ /* EUI64-based, 12 byte */ ++ return 6; ++ case 8: ++ /* EUI64-based, 8 byte */ ++ return 3; ++ default: ++ break; ++ } ++ break; ++ case 1: ++ /* T10 vendor ID */ ++ return 1; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ + /** + * scsi_vpd_lun_id - return a unique device identification + * @sdev: SCSI device +@@ -2964,7 +3036,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events); + */ + int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + { +- u8 cur_id_type = 0xff; ++ u8 cur_id_prio = 0; + u8 cur_id_size = 0; + const unsigned char *d, *cur_id_str; + const struct scsi_vpd *vpd_pg83; +@@ -2977,20 +3049,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + return -ENXIO; + } + +- /* +- * Look for the correct descriptor. +- * Order of preference for lun descriptor: +- * - SCSI name string +- * - NAA IEEE Registered Extended +- * - EUI-64 based 16-byte +- * - EUI-64 based 12-byte +- * - NAA IEEE Registered +- * - NAA IEEE Extended +- * - T10 Vendor ID +- * as longer descriptors reduce the likelyhood +- * of identification clashes. +- */ +- + /* The id string must be at least 20 bytes + terminating NULL byte */ + if (id_len < 21) { + rcu_read_unlock(); +@@ -3000,8 +3058,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + memset(id, 0, id_len); + d = vpd_pg83->data + 4; + while (d < vpd_pg83->data + vpd_pg83->len) { +- /* Skip designators not referring to the LUN */ +- if ((d[1] & 0x30) != 0x00) ++ u8 prio = designator_prio(d); ++ ++ if (prio == 0 || cur_id_prio > prio) + goto next_desig; + + switch (d[1] & 0xf) { +@@ -3009,28 +3068,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + /* T10 Vendor ID */ + if (cur_id_size > d[3]) + break; +- /* Prefer anything */ +- if (cur_id_type > 0x01 && cur_id_type != 0xff) +- break; ++ cur_id_prio = prio; + cur_id_size = d[3]; + if (cur_id_size + 4 > id_len) + cur_id_size = id_len - 4; + cur_id_str = d + 4; +- cur_id_type = d[1] & 0xf; + id_size = snprintf(id, id_len, "t10.%*pE", + cur_id_size, cur_id_str); + break; + case 0x2: + /* EUI-64 */ +- if (cur_id_size > d[3]) +- break; +- /* Prefer NAA IEEE Registered Extended */ +- if (cur_id_type == 0x3 && +- cur_id_size == d[3]) +- break; ++ cur_id_prio = prio; + cur_id_size = d[3]; + cur_id_str = d + 4; +- cur_id_type = d[1] & 0xf; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, +@@ -3048,17 +3098,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + cur_id_str); + break; + default: +- cur_id_size = 0; + break; + } + break; + case 0x3: + /* NAA */ +- if (cur_id_size > d[3]) +- break; ++ cur_id_prio = prio; + cur_id_size = d[3]; + cur_id_str = d + 4; +- cur_id_type = d[1] & 0xf; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, +@@ -3071,26 +3118,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) + cur_id_str); + break; + default: +- cur_id_size = 0; + break; + } + break; + case 0x8: + /* SCSI name string */ +- if (cur_id_size + 4 > d[3]) ++ if (cur_id_size > d[3]) + break; + /* Prefer others for truncated descriptor */ +- if (cur_id_size && d[3] > id_len) +- break; ++ if (d[3] > id_len) { ++ prio = 2; ++ if (cur_id_prio > prio) ++ break; ++ } ++ cur_id_prio = prio; + cur_id_size = id_size = d[3]; + cur_id_str = d + 4; +- cur_id_type = d[1] & 0xf; + if (cur_id_size >= id_len) + cur_id_size = id_len - 1; + memcpy(id, cur_id_str, cur_id_size); +- /* Decrease priority for truncated descriptor */ +- if (cur_id_size != id_size) +- cur_id_size = 6; + break; + default: + break; +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index 2eb3e4f9375a5..2e68c0a876986 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -2313,7 +2313,9 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) + return conn; + + release_conn_ref: +- put_device(&conn->dev); ++ device_unregister(&conn->dev); ++ put_device(&session->dev); ++ return NULL; + release_parent_ref: + put_device(&session->dev); + free_conn: +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 0c148fcd24deb..911aba3e7675c 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -1751,8 +1751,9 @@ static void __ufshcd_release(struct ufs_hba *hba) + + if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || + hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || +- ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || +- hba->active_uic_cmd || hba->uic_async_done) ++ hba->outstanding_tasks || ++ hba->active_uic_cmd || hba->uic_async_done || ++ hba->clk_gating.state == CLKS_OFF) + return; + + hba->clk_gating.state = REQ_CLKS_OFF; +diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c +index 4aad2566f52d2..f04b961b96cd4 100644 +--- a/drivers/slimbus/qcom-ctrl.c ++++ b/drivers/slimbus/qcom-ctrl.c +@@ -472,15 +472,10 @@ static void qcom_slim_rxwq(struct work_struct *work) + static void qcom_slim_prg_slew(struct platform_device *pdev, + struct qcom_slim_ctrl *ctrl) + { +- struct resource *slew_mem; +- + if (!ctrl->slew_reg) { + /* SLEW RATE register for this SLIMbus */ +- slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, +- "slew"); +- ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start, +- resource_size(slew_mem)); +- if (!ctrl->slew_reg) ++ ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew"); ++ if (IS_ERR(ctrl->slew_reg)) + return; + } + +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c +index 218aefc3531cd..50cfd67c2871e 100644 +--- a/drivers/slimbus/qcom-ngd-ctrl.c ++++ b/drivers/slimbus/qcom-ngd-ctrl.c +@@ -1205,6 +1205,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev) + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); + int ret = 0; + ++ if (!ctrl->qmi.handle) ++ return 0; ++ + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) + ret = qcom_slim_ngd_power_up(ctrl); + if (ret) { +@@ -1503,6 +1506,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); + int ret = 0; + ++ if (!ctrl->qmi.handle) ++ return 0; ++ + ret = qcom_slim_qmi_power_request(ctrl, false); + if (ret && ret != -EBUSY) + dev_info(ctrl->dev, "slim resource not idle:%d\n", ret); +diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c +index c655f5f92b124..d0329ad170d13 100644 +--- a/drivers/soc/amlogic/meson-canvas.c ++++ b/drivers/soc/amlogic/meson-canvas.c +@@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev) + * current state, this driver probe cannot return -EPROBE_DEFER + */ + canvas = dev_get_drvdata(&canvas_pdev->dev); +- if (!canvas) ++ if (!canvas) { ++ put_device(&canvas_pdev->dev); + return ERR_PTR(-EINVAL); ++ } + + return canvas; + } +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c +index f669d3754627d..ca75b14931ec9 100644 +--- a/drivers/soc/mediatek/mtk-scpsys.c ++++ b/drivers/soc/mediatek/mtk-scpsys.c +@@ -524,6 +524,7 @@ static void mtk_register_power_domains(struct platform_device *pdev, + for (i = 0; i < num; i++) { + struct scp_domain *scpd = &scp->domains[i]; + struct generic_pm_domain *genpd = &scpd->genpd; ++ bool on; + + /* + * Initially turn on all domains to make the domains usable +@@ -531,9 +532,9 @@ static void mtk_register_power_domains(struct platform_device *pdev, + * software. The unused domains will be switched off during + * late_init time. + */ +- genpd->power_on(genpd); ++ on = !WARN_ON(genpd->power_on(genpd) < 0); + +- pm_genpd_init(genpd, NULL, false); ++ pm_genpd_init(genpd, NULL, !on); + } + + /* +diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c +index 088dc99f77f3f..f63135c09667f 100644 +--- a/drivers/soc/qcom/pdr_interface.c ++++ b/drivers/soc/qcom/pdr_interface.c +@@ -569,7 +569,7 @@ EXPORT_SYMBOL(pdr_add_lookup); + int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds) + { + struct servreg_restart_pd_resp resp; +- struct servreg_restart_pd_req req; ++ struct servreg_restart_pd_req req = { 0 }; + struct sockaddr_qrtr addr; + struct pdr_service *tmp; + struct qmi_txn txn; +diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c +index d0e4f520cff8c..751a49f6534f4 100644 +--- a/drivers/soc/qcom/qcom-geni-se.c ++++ b/drivers/soc/qcom/qcom-geni-se.c +@@ -289,10 +289,23 @@ static void geni_se_select_fifo_mode(struct geni_se *se) + + static void geni_se_select_dma_mode(struct geni_se *se) + { ++ u32 proto = geni_se_read_proto(se); + u32 val; + + geni_se_irq_clear(se); + ++ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); ++ if (proto != GENI_SE_UART) { ++ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); ++ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); ++ } ++ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN); ++ ++ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN); ++ if (proto != GENI_SE_UART) ++ val &= ~S_CMD_DONE_EN; ++ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN); ++ + val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN); + val |= GENI_DMA_MODE_EN; + writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN); +@@ -651,7 +664,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, + writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L); + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H); + writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR); +- writel_relaxed(len, se->base + SE_DMA_TX_LEN); ++ writel(len, se->base + SE_DMA_TX_LEN); + return 0; + } + EXPORT_SYMBOL(geni_se_tx_dma_prep); +@@ -688,7 +701,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H); + /* RX does not have EOT buffer type bit. So just reset RX_ATTR */ + writel_relaxed(0, se->base + SE_DMA_RX_ATTR); +- writel_relaxed(len, se->base + SE_DMA_RX_LEN); ++ writel(len, se->base + SE_DMA_RX_LEN); + return 0; + } + EXPORT_SYMBOL(geni_se_rx_dma_prep); +diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c +index 07183d731d747..a9709aae54abb 100644 +--- a/drivers/soc/qcom/smp2p.c ++++ b/drivers/soc/qcom/smp2p.c +@@ -318,15 +318,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, + static int smp2p_update_bits(void *data, u32 mask, u32 value) + { + struct smp2p_entry *entry = data; ++ unsigned long flags; + u32 orig; + u32 val; + +- spin_lock(&entry->lock); ++ spin_lock_irqsave(&entry->lock, flags); + val = orig = readl(entry->value); + val &= ~mask; + val |= value; + writel(val, entry->value); +- spin_unlock(&entry->lock); ++ spin_unlock_irqrestore(&entry->lock, flags); + + if (val != orig) + qcom_smp2p_kick(entry->smp2p); +diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c +index 54b616ad4a62a..beb1c7211c3d6 100644 +--- a/drivers/soc/renesas/rmobile-sysc.c ++++ b/drivers/soc/renesas/rmobile-sysc.c +@@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void) + + pmd = of_get_child_by_name(np, "pm-domains"); + if (!pmd) { ++ iounmap(base); + pr_warn("%pOF lacks pm-domains node\n", np); + continue; + } +diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c +index eece97f97ef8f..b29e829e815e5 100644 +--- a/drivers/soc/rockchip/io-domain.c ++++ b/drivers/soc/rockchip/io-domain.c +@@ -547,6 +547,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev) + if (uV < 0) { + dev_err(iod->dev, "Can't determine voltage: %s\n", + supply_name); ++ ret = uV; + goto unreg_notify; + } + +diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c +index 8c863ecb1c605..56597f6ea666a 100644 +--- a/drivers/soc/ti/knav_dma.c ++++ b/drivers/soc/ti/knav_dma.c +@@ -749,8 +749,9 @@ static int knav_dma_probe(struct platform_device *pdev) + pm_runtime_enable(kdev->dev); + ret = pm_runtime_get_sync(kdev->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(kdev->dev); + dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); +- return ret; ++ goto err_pm_disable; + } + + /* Initialise all packet dmas */ +@@ -764,7 +765,8 @@ static int knav_dma_probe(struct platform_device *pdev) + + if (list_empty(&kdev->list)) { + dev_err(dev, "no valid dma instance\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto err_put_sync; + } + + debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, +@@ -772,6 +774,13 @@ static int knav_dma_probe(struct platform_device *pdev) + + device_ready = true; + return ret; ++ ++err_put_sync: ++ pm_runtime_put_sync(kdev->dev); ++err_pm_disable: ++ pm_runtime_disable(kdev->dev); ++ ++ return ret; + } + + static int knav_dma_remove(struct platform_device *pdev) +diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c +index a460f201bf8e7..53e36d4328d1e 100644 +--- a/drivers/soc/ti/knav_qmss_queue.c ++++ b/drivers/soc/ti/knav_qmss_queue.c +@@ -1784,6 +1784,7 @@ static int knav_queue_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(&pdev->dev); + dev_err(dev, "Failed to enable QMSS\n"); + return ret; + } +@@ -1851,9 +1852,10 @@ static int knav_queue_probe(struct platform_device *pdev) + if (ret) + goto err; + +- regions = of_get_child_by_name(node, "descriptor-regions"); ++ regions = of_get_child_by_name(node, "descriptor-regions"); + if (!regions) { + dev_err(dev, "descriptor-regions not specified\n"); ++ ret = -ENODEV; + goto err; + } + ret = knav_queue_setup_regions(kdev, regions); +diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c +index 980b04c38fd94..4d41dc3cdce1f 100644 +--- a/drivers/soc/ti/omap_prm.c ++++ b/drivers/soc/ti/omap_prm.c +@@ -484,6 +484,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev, + struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev); + int ret = 0; + ++ /* Nothing to do if the reset is already deasserted */ ++ if (!omap_reset_status(rcdev, id)) ++ return 0; ++ + has_rstst = reset->prm->data->rstst || + (reset->prm->data->flags & OMAP_PRM_HAS_RSTST); + +diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c +index 3488bb824e845..9b05c9e25ebe4 100644 +--- a/drivers/soundwire/master.c ++++ b/drivers/soundwire/master.c +@@ -8,6 +8,15 @@ + #include <linux/soundwire/sdw_type.h> + #include "bus.h" + ++/* ++ * The 3s value for autosuspend will only be used if there are no ++ * devices physically attached on a bus segment. In practice enabling ++ * the bus operation will result in children devices become active and ++ * the master device will only suspend when all its children are no ++ * longer active. ++ */ ++#define SDW_MASTER_SUSPEND_DELAY_MS 3000 ++ + /* + * The sysfs for properties reflects the MIPI description as given + * in the MIPI DisCo spec +@@ -154,7 +163,12 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent, + bus->dev = &md->dev; + bus->md = md; + ++ pm_runtime_set_autosuspend_delay(&bus->md->dev, SDW_MASTER_SUSPEND_DELAY_MS); ++ pm_runtime_use_autosuspend(&bus->md->dev); ++ pm_runtime_mark_last_busy(&bus->md->dev); ++ pm_runtime_set_active(&bus->md->dev); + pm_runtime_enable(&bus->md->dev); ++ pm_runtime_idle(&bus->md->dev); + device_register_err: + return ret; + } +diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c +index fbca4ebf63e92..6d22df01f3547 100644 +--- a/drivers/soundwire/qcom.c ++++ b/drivers/soundwire/qcom.c +@@ -799,7 +799,7 @@ static int qcom_swrm_probe(struct platform_device *pdev) + data = of_device_get_match_data(dev); + ctrl->rows_index = sdw_find_row_index(data->default_rows); + ctrl->cols_index = sdw_find_col_index(data->default_cols); +-#if IS_ENABLED(CONFIG_SLIMBUS) ++#if IS_REACHABLE(CONFIG_SLIMBUS) + if (dev->parent->bus == &slimbus_bus) { + #else + if (false) { +diff --git a/drivers/soundwire/sysfs_slave_dpn.c b/drivers/soundwire/sysfs_slave_dpn.c +index 05a721ea9830a..c4b6543c09fd6 100644 +--- a/drivers/soundwire/sysfs_slave_dpn.c ++++ b/drivers/soundwire/sysfs_slave_dpn.c +@@ -37,6 +37,7 @@ static int field##_attribute_alloc(struct device *dev, \ + return -ENOMEM; \ + dpn_attr->N = N; \ + dpn_attr->dir = dir; \ ++ sysfs_attr_init(&dpn_attr->dev_attr.attr); \ + dpn_attr->format_string = format_string; \ + dpn_attr->dev_attr.attr.name = __stringify(field); \ + dpn_attr->dev_attr.attr.mode = 0444; \ +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig +index 5cff60de8e834..3fd16b7f61507 100644 +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -255,6 +255,7 @@ config SPI_DW_MMIO + config SPI_DW_BT1 + tristate "Baikal-T1 SPI driver for DW SPI core" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST ++ select MULTIPLEXER + help + Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI + controllers. Two of them are pretty much normal: with IRQ, DMA, +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c +index 8c009c175f2c4..1e63fd4821f96 100644 +--- a/drivers/spi/atmel-quadspi.c ++++ b/drivers/spi/atmel-quadspi.c +@@ -365,10 +365,14 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + if (dummy_cycles) + ifr |= QSPI_IFR_NBDUM(dummy_cycles); + +- /* Set data enable */ +- if (op->data.nbytes) ++ /* Set data enable and data transfer type. */ ++ if (op->data.nbytes) { + ifr |= QSPI_IFR_DATAEN; + ++ if (op->addr.nbytes) ++ ifr |= QSPI_IFR_TFRTYP_MEM; ++ } ++ + /* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). +@@ -393,7 +397,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + atmel_qspi_write(icr, aq, QSPI_WICR); + atmel_qspi_write(ifr, aq, QSPI_IFR); + } else { +- if (op->data.dir == SPI_MEM_DATA_OUT) ++ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; + + /* Set QSPI Instruction Frame registers */ +@@ -535,7 +539,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) + struct resource *res; + int irq, err = 0; + +- ctrl = spi_alloc_master(&pdev->dev, sizeof(*aq)); ++ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq)); + if (!ctrl) + return -ENOMEM; + +@@ -557,8 +561,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) + aq->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(aq->regs)) { + dev_err(&pdev->dev, "missing registers\n"); +- err = PTR_ERR(aq->regs); +- goto exit; ++ return PTR_ERR(aq->regs); + } + + /* Map the AHB memory */ +@@ -566,8 +569,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) + aq->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(aq->mem)) { + dev_err(&pdev->dev, "missing AHB memory\n"); +- err = PTR_ERR(aq->mem); +- goto exit; ++ return PTR_ERR(aq->mem); + } + + aq->mmap_size = resource_size(res); +@@ -579,22 +581,21 @@ static int atmel_qspi_probe(struct platform_device *pdev) + + if (IS_ERR(aq->pclk)) { + dev_err(&pdev->dev, "missing peripheral clock\n"); +- err = PTR_ERR(aq->pclk); +- goto exit; ++ return PTR_ERR(aq->pclk); + } + + /* Enable the peripheral clock */ + err = clk_prepare_enable(aq->pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); +- goto exit; ++ return err; + } + + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + err = -EINVAL; +- goto exit; ++ goto disable_pclk; + } + + if (aq->caps->has_qspick) { +@@ -638,8 +639,6 @@ disable_qspick: + clk_disable_unprepare(aq->qspick); + disable_pclk: + clk_disable_unprepare(aq->pclk); +-exit: +- spi_controller_put(ctrl); + + return err; + } +diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c +index d08dec09d423d..def32e0aaefe3 100644 +--- a/drivers/spi/spi-ar934x.c ++++ b/drivers/spi/spi-ar934x.c +@@ -176,10 +176,11 @@ static int ar934x_spi_probe(struct platform_device *pdev) + if (ret) + return ret; + +- ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); ++ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp)); + if (!ctlr) { + dev_info(&pdev->dev, "failed to allocate spi controller\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_clk_disable; + } + + /* disable flash mapping and expose spi controller registers */ +@@ -202,7 +203,13 @@ static int ar934x_spi_probe(struct platform_device *pdev) + sp->clk_freq = clk_get_rate(clk); + sp->ctlr = ctlr; + +- return devm_spi_register_controller(&pdev->dev, ctlr); ++ ret = spi_register_controller(ctlr); ++ if (!ret) ++ return 0; ++ ++err_clk_disable: ++ clk_disable_unprepare(clk); ++ return ret; + } + + static int ar934x_spi_remove(struct platform_device *pdev) +@@ -213,6 +220,7 @@ static int ar934x_spi_remove(struct platform_device *pdev) + ctlr = dev_get_drvdata(&pdev->dev); + sp = spi_controller_get_devdata(ctlr); + ++ spi_unregister_controller(ctlr); + clk_disable_unprepare(sp->clk); + + return 0; +diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c +index 9909b18f3c5a5..1f08d7553f079 100644 +--- a/drivers/spi/spi-bcm63xx-hsspi.c ++++ b/drivers/spi/spi-bcm63xx-hsspi.c +@@ -494,8 +494,10 @@ static int bcm63xx_hsspi_resume(struct device *dev) + + if (bs->pll_clk) { + ret = clk_prepare_enable(bs->pll_clk); +- if (ret) ++ if (ret) { ++ clk_disable_unprepare(bs->clk); + return ret; ++ } + } + + spi_master_resume(master); +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c +index 818f2b22875d2..7453a1dbbc061 100644 +--- a/drivers/spi/spi-davinci.c ++++ b/drivers/spi/spi-davinci.c +@@ -1040,13 +1040,13 @@ static int davinci_spi_remove(struct platform_device *pdev) + spi_bitbang_stop(&dspi->bitbang); + + clk_disable_unprepare(dspi->clk); +- spi_master_put(master); + + if (dspi->dma_rx) { + dma_release_channel(dspi->dma_rx); + dma_release_channel(dspi->dma_tx); + } + ++ spi_master_put(master); + return 0; + } + +diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c +index f382dfad78421..c279b7891e3ac 100644 +--- a/drivers/spi/spi-dw-bt1.c ++++ b/drivers/spi/spi-dw-bt1.c +@@ -280,8 +280,10 @@ static int dw_spi_bt1_probe(struct platform_device *pdev) + dws->bus_num = pdev->id; + dws->reg_io_width = 4; + dws->max_freq = clk_get_rate(dwsbt1->clk); +- if (!dws->max_freq) ++ if (!dws->max_freq) { ++ ret = -EINVAL; + goto err_disable_clk; ++ } + + init_func = device_get_match_data(&pdev->dev); + ret = init_func(pdev, dwsbt1); +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index 1a08c1d584abe..0287366874882 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -1165,7 +1165,7 @@ static int dspi_init(struct fsl_dspi *dspi) + unsigned int mcr; + + /* Set idle states for all chip select signals to high */ +- mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0)); ++ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0)); + + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) + mcr |= SPI_MCR_XSPI; +@@ -1250,7 +1250,7 @@ static int dspi_probe(struct platform_device *pdev) + + pdata = dev_get_platdata(&pdev->dev); + if (pdata) { +- ctlr->num_chipselect = pdata->cs_num; ++ ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num; + ctlr->bus_num = pdata->bus_num; + + /* Only Coldfire uses platform data */ +@@ -1263,7 +1263,7 @@ static int dspi_probe(struct platform_device *pdev) + dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); + goto out_ctlr_put; + } +- ctlr->num_chipselect = cs_num; ++ ctlr->num_chipselect = ctlr->max_native_cs = cs_num; + + of_property_read_u32(np, "bus-num", &bus_num); + ctlr->bus_num = bus_num; +diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c +index 299e9870cf58d..9494257e1c33f 100644 +--- a/drivers/spi/spi-fsl-spi.c ++++ b/drivers/spi/spi-fsl-spi.c +@@ -716,10 +716,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) + type = fsl_spi_get_type(&ofdev->dev); + if (type == TYPE_FSL) { + struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); ++ bool spisel_boot = false; + #if IS_ENABLED(CONFIG_FSL_SOC) + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); +- bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot"); + ++ spisel_boot = of_property_read_bool(np, "fsl,spisel_boot"); + if (spisel_boot) { + pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); + if (!pinfo->immr_spi_cs) +@@ -734,10 +735,14 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) + * supported on the GRLIB variant. + */ + ret = gpiod_count(dev, "cs"); +- if (ret <= 0) ++ if (ret < 0) ++ ret = 0; ++ if (ret == 0 && !spisel_boot) { + pdata->max_chipselect = 1; +- else ++ } else { ++ pdata->max_chipselect = ret + spisel_boot; + pdata->cs_control = fsl_spi_cs_control; ++ } + } + + ret = of_address_to_resource(np, 0, &mem); +diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c +index 25810a7eef101..0e3d8e6c08f42 100644 +--- a/drivers/spi/spi-geni-qcom.c ++++ b/drivers/spi/spi-geni-qcom.c +@@ -603,7 +603,7 @@ static int spi_geni_probe(struct platform_device *pdev) + if (IS_ERR(clk)) + return PTR_ERR(clk); + +- spi = spi_alloc_master(dev, sizeof(*mas)); ++ spi = devm_spi_alloc_master(dev, sizeof(*mas)); + if (!spi) + return -ENOMEM; + +@@ -673,7 +673,6 @@ spi_geni_probe_free_irq: + free_irq(mas->irq, spi); + spi_geni_probe_runtime_disable: + pm_runtime_disable(dev); +- spi_master_put(spi); + dev_pm_opp_of_remove_table(&pdev->dev); + put_clkname: + dev_pm_opp_put_clkname(mas->se.opp_table); +diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c +index 7ceb0ba27b755..0584f4d2fde29 100644 +--- a/drivers/spi/spi-gpio.c ++++ b/drivers/spi/spi-gpio.c +@@ -350,11 +350,6 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev, + return 0; + } + +-static void spi_gpio_put(void *data) +-{ +- spi_master_put(data); +-} +- + static int spi_gpio_probe(struct platform_device *pdev) + { + int status; +@@ -363,16 +358,10 @@ static int spi_gpio_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct spi_bitbang *bb; + +- master = spi_alloc_master(dev, sizeof(*spi_gpio)); ++ master = devm_spi_alloc_master(dev, sizeof(*spi_gpio)); + if (!master) + return -ENOMEM; + +- status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); +- if (status) { +- spi_master_put(master); +- return status; +- } +- + if (pdev->dev.of_node) + status = spi_gpio_probe_dt(pdev, master); + else +@@ -432,7 +421,7 @@ static int spi_gpio_probe(struct platform_device *pdev) + if (status) + return status; + +- return devm_spi_register_master(&pdev->dev, spi_master_get(master)); ++ return devm_spi_register_master(&pdev->dev, master); + } + + MODULE_ALIAS("platform:" DRIVER_NAME); +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c +index b068537375d60..5f05d519fbbd0 100644 +--- a/drivers/spi/spi-img-spfi.c ++++ b/drivers/spi/spi-img-spfi.c +@@ -731,8 +731,10 @@ static int img_spfi_resume(struct device *dev) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret) ++ if (ret) { ++ pm_runtime_put_noidle(dev); + return ret; ++ } + spfi_reset(spfi); + pm_runtime_put(dev); + +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c +index 0b597905ee72c..8df5e973404f0 100644 +--- a/drivers/spi/spi-imx.c ++++ b/drivers/spi/spi-imx.c +@@ -1538,6 +1538,7 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg) + + ret = pm_runtime_get_sync(spi_imx->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(spi_imx->dev); + dev_err(spi_imx->dev, "failed to enable clock\n"); + return ret; + } +@@ -1748,6 +1749,7 @@ static int spi_imx_remove(struct platform_device *pdev) + + ret = pm_runtime_get_sync(spi_imx->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(spi_imx->dev); + dev_err(spi_imx->dev, "failed to enable clock\n"); + return ret; + } +diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c +index ef53290b7d24d..4682f49dc7330 100644 +--- a/drivers/spi/spi-mem.c ++++ b/drivers/spi/spi-mem.c +@@ -243,6 +243,7 @@ static int spi_mem_access_start(struct spi_mem *mem) + + ret = pm_runtime_get_sync(ctlr->dev.parent); + if (ret < 0) { ++ pm_runtime_put_noidle(ctlr->dev.parent); + dev_err(&ctlr->dev, "Failed to power device: %d\n", + ret); + return ret; +diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c +index 2c3b7a2a1ec77..b4b9b7309b5e9 100644 +--- a/drivers/spi/spi-mt7621.c ++++ b/drivers/spi/spi-mt7621.c +@@ -350,9 +350,10 @@ static int mt7621_spi_probe(struct platform_device *pdev) + if (status) + return status; + +- master = spi_alloc_master(&pdev->dev, sizeof(*rs)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs)); + if (!master) { + dev_info(&pdev->dev, "master allocation failed\n"); ++ clk_disable_unprepare(clk); + return -ENOMEM; + } + +@@ -377,10 +378,15 @@ static int mt7621_spi_probe(struct platform_device *pdev) + ret = device_reset(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "SPI reset failed!\n"); ++ clk_disable_unprepare(clk); + return ret; + } + +- return devm_spi_register_controller(&pdev->dev, master); ++ ret = spi_register_controller(master); ++ if (ret) ++ clk_disable_unprepare(clk); ++ ++ return ret; + } + + static int mt7621_spi_remove(struct platform_device *pdev) +@@ -391,6 +397,7 @@ static int mt7621_spi_remove(struct platform_device *pdev) + master = dev_get_drvdata(&pdev->dev); + rs = spi_controller_get_devdata(master); + ++ spi_unregister_controller(master); + clk_disable_unprepare(rs->clk); + + return 0; +diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c +index b97f26a60cbef..288f6c2bbd573 100644 +--- a/drivers/spi/spi-mtk-nor.c ++++ b/drivers/spi/spi-mtk-nor.c +@@ -768,7 +768,7 @@ static int mtk_nor_probe(struct platform_device *pdev) + return -EINVAL; + } + +- ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); ++ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp)); + if (!ctlr) { + dev_err(&pdev->dev, "failed to allocate spi controller\n"); + return -ENOMEM; +diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c +index 8c630acb0110b..96b418293bf2a 100644 +--- a/drivers/spi/spi-mxic.c ++++ b/drivers/spi/spi-mxic.c +@@ -529,7 +529,7 @@ static int mxic_spi_probe(struct platform_device *pdev) + struct mxic_spi *mxic; + int ret; + +- master = spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi)); + if (!master) + return -ENOMEM; + +@@ -574,15 +574,9 @@ static int mxic_spi_probe(struct platform_device *pdev) + ret = spi_register_master(master); + if (ret) { + dev_err(&pdev->dev, "spi_register_master failed\n"); +- goto err_put_master; ++ pm_runtime_disable(&pdev->dev); + } + +- return 0; +- +-err_put_master: +- spi_master_put(master); +- pm_runtime_disable(&pdev->dev); +- + return ret; + } + +diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c +index 918918a9e0491..435309b09227e 100644 +--- a/drivers/spi/spi-mxs.c ++++ b/drivers/spi/spi-mxs.c +@@ -607,6 +607,7 @@ static int mxs_spi_probe(struct platform_device *pdev) + + ret = pm_runtime_get_sync(ssp->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(ssp->dev); + dev_err(ssp->dev, "runtime_get_sync failed\n"); + goto out_pm_runtime_disable; + } +diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c +index 1cb9329de945e..b62471ab6d7f2 100644 +--- a/drivers/spi/spi-npcm-fiu.c ++++ b/drivers/spi/spi-npcm-fiu.c +@@ -677,7 +677,7 @@ static int npcm_fiu_probe(struct platform_device *pdev) + struct npcm_fiu_spi *fiu; + void __iomem *regbase; + struct resource *res; +- int id; ++ int id, ret; + + ctrl = devm_spi_alloc_master(dev, sizeof(*fiu)); + if (!ctrl) +@@ -735,7 +735,11 @@ static int npcm_fiu_probe(struct platform_device *pdev) + ctrl->num_chipselect = fiu->info->max_cs; + ctrl->dev.of_node = dev->of_node; + +- return devm_spi_register_master(dev, ctrl); ++ ret = devm_spi_register_master(dev, ctrl); ++ if (ret) ++ clk_disable_unprepare(fiu->clk); ++ ++ return ret; + } + + static int npcm_fiu_remove(struct platform_device *pdev) +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c +index 156961b4ca86f..104bde153efd2 100644 +--- a/drivers/spi/spi-pic32.c ++++ b/drivers/spi/spi-pic32.c +@@ -839,6 +839,7 @@ static int pic32_spi_probe(struct platform_device *pdev) + return 0; + + err_bailout: ++ pic32_spi_dma_unprep(pic32s); + clk_disable_unprepare(pic32s->clk); + err_master: + spi_master_put(master); +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 814268405ab0b..d6b534d38e5da 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1686,9 +1686,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + } + + if (platform_info->is_slave) +- controller = spi_alloc_slave(dev, sizeof(struct driver_data)); ++ controller = devm_spi_alloc_slave(dev, sizeof(*drv_data)); + else +- controller = spi_alloc_master(dev, sizeof(struct driver_data)); ++ controller = devm_spi_alloc_master(dev, sizeof(*drv_data)); + + if (!controller) { + dev_err(&pdev->dev, "cannot alloc spi_controller\n"); +@@ -1911,7 +1911,6 @@ out_error_dma_irq_alloc: + free_irq(ssp->irq, drv_data); + + out_error_controller_alloc: +- spi_controller_put(controller); + pxa_ssp_free(ssp); + return status; + } +diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c +index 5eed88af6899b..8863be3708845 100644 +--- a/drivers/spi/spi-qcom-qspi.c ++++ b/drivers/spi/spi-qcom-qspi.c +@@ -462,7 +462,7 @@ static int qcom_qspi_probe(struct platform_device *pdev) + + dev = &pdev->dev; + +- master = spi_alloc_master(dev, sizeof(*ctrl)); ++ master = devm_spi_alloc_master(dev, sizeof(*ctrl)); + if (!master) + return -ENOMEM; + +@@ -473,54 +473,49 @@ static int qcom_qspi_probe(struct platform_device *pdev) + spin_lock_init(&ctrl->lock); + ctrl->dev = dev; + ctrl->base = devm_platform_ioremap_resource(pdev, 0); +- if (IS_ERR(ctrl->base)) { +- ret = PTR_ERR(ctrl->base); +- goto exit_probe_master_put; +- } ++ if (IS_ERR(ctrl->base)) ++ return PTR_ERR(ctrl->base); + + ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, + sizeof(*ctrl->clks), GFP_KERNEL); +- if (!ctrl->clks) { +- ret = -ENOMEM; +- goto exit_probe_master_put; +- } ++ if (!ctrl->clks) ++ return -ENOMEM; + + ctrl->clks[QSPI_CLK_CORE].id = "core"; + ctrl->clks[QSPI_CLK_IFACE].id = "iface"; + ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); + if (ret) +- goto exit_probe_master_put; ++ return ret; + + ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config"); +- if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) { +- ret = dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi), +- "Failed to get cpu path\n"); +- goto exit_probe_master_put; +- } ++ if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) ++ return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi), ++ "Failed to get cpu path\n"); ++ + /* Set BW vote for register access */ + ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000), + Bps_to_icc(1000)); + if (ret) { + dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n", + __func__, ret); +- goto exit_probe_master_put; ++ return ret; + } + + ret = icc_disable(ctrl->icc_path_cpu_to_qspi); + if (ret) { + dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n", + __func__, ret); +- goto exit_probe_master_put; ++ return ret; + } + + ret = platform_get_irq(pdev, 0); + if (ret < 0) +- goto exit_probe_master_put; ++ return ret; + ret = devm_request_irq(dev, ret, qcom_qspi_irq, + IRQF_TRIGGER_HIGH, dev_name(dev), ctrl); + if (ret) { + dev_err(dev, "Failed to request irq %d\n", ret); +- goto exit_probe_master_put; ++ return ret; + } + + master->max_speed_hz = 300000000; +@@ -537,10 +532,8 @@ static int qcom_qspi_probe(struct platform_device *pdev) + master->auto_runtime_pm = true; + + ctrl->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core"); +- if (IS_ERR(ctrl->opp_table)) { +- ret = PTR_ERR(ctrl->opp_table); +- goto exit_probe_master_put; +- } ++ if (IS_ERR(ctrl->opp_table)) ++ return PTR_ERR(ctrl->opp_table); + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(&pdev->dev); + if (ret && ret != -ENODEV) { +@@ -562,9 +555,6 @@ static int qcom_qspi_probe(struct platform_device *pdev) + exit_probe_put_clkname: + dev_pm_opp_put_clkname(ctrl->opp_table); + +-exit_probe_master_put: +- spi_master_put(master); +- + return ret; + } + +diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c +index 8aa51beb4ff3e..9f97d18a05c10 100644 +--- a/drivers/spi/spi-rb4xx.c ++++ b/drivers/spi/spi-rb4xx.c +@@ -143,7 +143,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev) + if (IS_ERR(spi_base)) + return PTR_ERR(spi_base); + +- master = spi_alloc_master(&pdev->dev, sizeof(*rbspi)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi)); + if (!master) + return -ENOMEM; + +diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c +index ed3e548227f47..3579675485a5e 100644 +--- a/drivers/spi/spi-rpc-if.c ++++ b/drivers/spi/spi-rpc-if.c +@@ -134,7 +134,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) + struct rpcif *rpc; + int error; + +- ctlr = spi_alloc_master(&pdev->dev, sizeof(*rpc)); ++ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*rpc)); + if (!ctlr) + return -ENOMEM; + +@@ -159,13 +159,8 @@ static int rpcif_spi_probe(struct platform_device *pdev) + error = spi_register_controller(ctlr); + if (error) { + dev_err(&pdev->dev, "spi_register_controller failed\n"); +- goto err_put_ctlr; ++ rpcif_disable_rpm(rpc); + } +- return 0; +- +-err_put_ctlr: +- rpcif_disable_rpm(rpc); +- spi_controller_put(ctlr); + + return error; + } +diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c +index ee0f3edf49cdb..297c512069a57 100644 +--- a/drivers/spi/spi-sc18is602.c ++++ b/drivers/spi/spi-sc18is602.c +@@ -238,13 +238,12 @@ static int sc18is602_probe(struct i2c_client *client, + struct sc18is602_platform_data *pdata = dev_get_platdata(dev); + struct sc18is602 *hw; + struct spi_master *master; +- int error; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | + I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) + return -EINVAL; + +- master = spi_alloc_master(dev, sizeof(struct sc18is602)); ++ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602)); + if (!master) + return -ENOMEM; + +@@ -298,15 +297,7 @@ static int sc18is602_probe(struct i2c_client *client, + master->min_speed_hz = hw->freq / 128; + master->max_speed_hz = hw->freq / 4; + +- error = devm_spi_register_master(dev, master); +- if (error) +- goto error_reg; +- +- return 0; +- +-error_reg: +- spi_master_put(master); +- return error; ++ return devm_spi_register_master(dev, master); + } + + static const struct i2c_device_id sc18is602_id[] = { +diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c +index 20bdae5fdf3b8..15123a8f41e1e 100644 +--- a/drivers/spi/spi-sh.c ++++ b/drivers/spi/spi-sh.c +@@ -440,7 +440,7 @@ static int spi_sh_probe(struct platform_device *pdev) + if (irq < 0) + return irq; + +- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); + if (master == NULL) { + dev_err(&pdev->dev, "spi_alloc_master error.\n"); + return -ENOMEM; +@@ -458,16 +458,14 @@ static int spi_sh_probe(struct platform_device *pdev) + break; + default: + dev_err(&pdev->dev, "No support width\n"); +- ret = -ENODEV; +- goto error1; ++ return -ENODEV; + } + ss->irq = irq; + ss->master = master; + ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (ss->addr == NULL) { + dev_err(&pdev->dev, "ioremap error.\n"); +- ret = -ENOMEM; +- goto error1; ++ return -ENOMEM; + } + INIT_LIST_HEAD(&ss->queue); + spin_lock_init(&ss->lock); +@@ -477,7 +475,7 @@ static int spi_sh_probe(struct platform_device *pdev) + ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); + if (ret < 0) { + dev_err(&pdev->dev, "request_irq error\n"); +- goto error1; ++ return ret; + } + + master->num_chipselect = 2; +@@ -496,9 +494,6 @@ static int spi_sh_probe(struct platform_device *pdev) + + error3: + free_irq(irq, ss); +- error1: +- spi_master_put(master); +- + return ret; + } + +diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c +index 635738f54c731..b41a75749b498 100644 +--- a/drivers/spi/spi-sprd.c ++++ b/drivers/spi/spi-sprd.c +@@ -1010,6 +1010,7 @@ static int sprd_spi_remove(struct platform_device *pdev) + + ret = pm_runtime_get_sync(ss->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(ss->dev); + dev_err(ss->dev, "failed to resume SPI controller\n"); + return ret; + } +diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c +index 77d26d64541a5..6c44dda9ee8c5 100644 +--- a/drivers/spi/spi-st-ssc4.c ++++ b/drivers/spi/spi-st-ssc4.c +@@ -375,13 +375,14 @@ static int spi_st_probe(struct platform_device *pdev) + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&pdev->dev, "Failed to register master\n"); +- goto clk_disable; ++ goto rpm_disable; + } + + return 0; + +-clk_disable: ++rpm_disable: + pm_runtime_disable(&pdev->dev); ++clk_disable: + clk_disable_unprepare(spi_st->clk); + put_master: + spi_master_put(master); +diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c +index a900962b4336e..947e6b9dc9f4d 100644 +--- a/drivers/spi/spi-stm32-qspi.c ++++ b/drivers/spi/spi-stm32-qspi.c +@@ -434,8 +434,10 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) + int ret; + + ret = pm_runtime_get_sync(qspi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(qspi->dev); + return ret; ++ } + + mutex_lock(&qspi->lock); + ret = stm32_qspi_send(mem, op); +@@ -462,8 +464,10 @@ static int stm32_qspi_setup(struct spi_device *spi) + return -EINVAL; + + ret = pm_runtime_get_sync(qspi->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(qspi->dev); + return ret; ++ } + + presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1; + +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c +index 2cc850eb8922d..471dedf3d3392 100644 +--- a/drivers/spi/spi-stm32.c ++++ b/drivers/spi/spi-stm32.c +@@ -2062,6 +2062,7 @@ static int stm32_spi_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c +index 42e82dbe3d410..8cdca6ab80989 100644 +--- a/drivers/spi/spi-synquacer.c ++++ b/drivers/spi/spi-synquacer.c +@@ -657,7 +657,8 @@ static int synquacer_spi_probe(struct platform_device *pdev) + + if (!master->max_speed_hz) { + dev_err(&pdev->dev, "missing clock source\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto disable_clk; + } + master->min_speed_hz = master->max_speed_hz / 254; + +@@ -670,7 +671,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) + rx_irq = platform_get_irq(pdev, 0); + if (rx_irq <= 0) { + ret = rx_irq; +- goto put_spi; ++ goto disable_clk; + } + snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx", + dev_name(&pdev->dev)); +@@ -678,13 +679,13 @@ static int synquacer_spi_probe(struct platform_device *pdev) + 0, sspi->rx_irq_name, sspi); + if (ret) { + dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret); +- goto put_spi; ++ goto disable_clk; + } + + tx_irq = platform_get_irq(pdev, 1); + if (tx_irq <= 0) { + ret = tx_irq; +- goto put_spi; ++ goto disable_clk; + } + snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx", + dev_name(&pdev->dev)); +@@ -692,7 +693,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) + 0, sspi->tx_irq_name, sspi); + if (ret) { + dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret); +- goto put_spi; ++ goto disable_clk; + } + + master->dev.of_node = np; +@@ -710,7 +711,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) + + ret = synquacer_spi_enable(master); + if (ret) +- goto fail_enable; ++ goto disable_clk; + + pm_runtime_set_active(sspi->dev); + pm_runtime_enable(sspi->dev); +@@ -723,7 +724,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) + + disable_pm: + pm_runtime_disable(sspi->dev); +-fail_enable: ++disable_clk: + clk_disable_unprepare(sspi->clk); + put_spi: + spi_master_put(master); +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c +index ca6886aaa5197..a2e5907276e7f 100644 +--- a/drivers/spi/spi-tegra114.c ++++ b/drivers/spi/spi-tegra114.c +@@ -966,6 +966,7 @@ static int tegra_spi_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(tspi->dev); + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); + if (cdata) + tegra_spi_cleanup(spi); +@@ -1474,6 +1475,7 @@ static int tegra_spi_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c +index b59015c7c8a80..cfb7de7379376 100644 +--- a/drivers/spi/spi-tegra20-sflash.c ++++ b/drivers/spi/spi-tegra20-sflash.c +@@ -552,6 +552,7 @@ static int tegra_sflash_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c +index a0810765d4e52..f7c832fd40036 100644 +--- a/drivers/spi/spi-tegra20-slink.c ++++ b/drivers/spi/spi-tegra20-slink.c +@@ -751,6 +751,7 @@ static int tegra_slink_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(tspi->dev); + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +@@ -1188,6 +1189,7 @@ static int tegra_slink_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c +index 3c41649698a5b..9417385c09217 100644 +--- a/drivers/spi/spi-ti-qspi.c ++++ b/drivers/spi/spi-ti-qspi.c +@@ -174,6 +174,7 @@ static int ti_qspi_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(qspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(qspi->dev); + dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); + return ret; + } +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index fc9a59788d2ea..2eaa7dbb70108 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -405,9 +405,11 @@ static int spi_drv_probe(struct device *dev) + if (ret) + return ret; + +- ret = sdrv->probe(spi); +- if (ret) +- dev_pm_domain_detach(dev, true); ++ if (sdrv->probe) { ++ ret = sdrv->probe(spi); ++ if (ret) ++ dev_pm_domain_detach(dev, true); ++ } + + return ret; + } +@@ -415,9 +417,10 @@ static int spi_drv_probe(struct device *dev) + static int spi_drv_remove(struct device *dev) + { + const struct spi_driver *sdrv = to_spi_driver(dev->driver); +- int ret; ++ int ret = 0; + +- ret = sdrv->remove(to_spi_device(dev)); ++ if (sdrv->remove) ++ ret = sdrv->remove(to_spi_device(dev)); + dev_pm_domain_detach(dev, true); + + return ret; +@@ -442,10 +445,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) + { + sdrv->driver.owner = owner; + sdrv->driver.bus = &spi_bus_type; +- if (sdrv->probe) +- sdrv->driver.probe = spi_drv_probe; +- if (sdrv->remove) +- sdrv->driver.remove = spi_drv_remove; ++ sdrv->driver.probe = spi_drv_probe; ++ sdrv->driver.remove = spi_drv_remove; + if (sdrv->shutdown) + sdrv->driver.shutdown = spi_drv_shutdown; + return driver_register(&sdrv->driver); +diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c +index ea430237efa7f..9da8dd748078d 100644 +--- a/drivers/staging/comedi/drivers/mf6x4.c ++++ b/drivers/staging/comedi/drivers/mf6x4.c +@@ -112,8 +112,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev, + struct mf6x4_private *devpriv = dev->private; + unsigned int status; + ++ /* EOLC goes low at end of conversion. */ + status = ioread32(devpriv->gpioc_reg); +- if (status & MF6X4_GPIOC_EOLC) ++ if ((status & MF6X4_GPIOC_EOLC) == 0) + return 0; + return -EBUSY; + } +diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c +index 2d6195f7300e9..864342acfd86e 100644 +--- a/drivers/staging/gasket/gasket_interrupt.c ++++ b/drivers/staging/gasket/gasket_interrupt.c +@@ -487,14 +487,16 @@ int gasket_interrupt_system_status(struct gasket_dev *gasket_dev) + int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data, + int interrupt, int event_fd) + { +- struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd); +- +- if (IS_ERR(ctx)) +- return PTR_ERR(ctx); ++ struct eventfd_ctx *ctx; + + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts) + return -EINVAL; + ++ ctx = eventfd_ctx_fdget(event_fd); ++ ++ if (IS_ERR(ctx)) ++ return PTR_ERR(ctx); ++ + interrupt_data->eventfd_ctxs[interrupt] = ctx; + return 0; + } +@@ -505,6 +507,9 @@ int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data, + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts) + return -EINVAL; + +- interrupt_data->eventfd_ctxs[interrupt] = NULL; ++ if (interrupt_data->eventfd_ctxs[interrupt]) { ++ eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]); ++ interrupt_data->eventfd_ctxs[interrupt] = NULL; ++ } + return 0; + } +diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c +index 494aa823e9984..42ce6c88ea753 100644 +--- a/drivers/staging/greybus/audio_codec.c ++++ b/drivers/staging/greybus/audio_codec.c +@@ -490,6 +490,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream, + if (ret) { + dev_err_ratelimited(dai->dev, "%d: Error during set_config\n", + ret); ++ gb_pm_runtime_put_noidle(bundle); + mutex_unlock(&codec->lock); + return ret; + } +@@ -566,6 +567,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream, + break; + } + if (ret) { ++ gb_pm_runtime_put_noidle(bundle); + mutex_unlock(&codec->lock); + dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n", + ret); +diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c +index 237531ba60f30..3011b8abce389 100644 +--- a/drivers/staging/greybus/audio_helper.c ++++ b/drivers/staging/greybus/audio_helper.c +@@ -135,7 +135,8 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, + if (!w) { + dev_err(dapm->dev, "%s: widget not found\n", + widget->name); +- return -EINVAL; ++ widget++; ++ continue; + } + widget++; + #ifdef CONFIG_DEBUG_FS +diff --git a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c +index 64b30d263c8d0..4f34a52829700 100644 +--- a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c ++++ b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c +@@ -262,8 +262,10 @@ static int hi6421_spmi_pmic_probe(struct spmi_device *pdev) + hi6421_spmi_pmic_irq_prc(pmic); + + pmic->irqs = devm_kzalloc(dev, HISI_IRQ_NUM * sizeof(int), GFP_KERNEL); +- if (!pmic->irqs) ++ if (!pmic->irqs) { ++ ret = -ENOMEM; + goto irq_malloc; ++ } + + pmic->domain = irq_domain_add_simple(np, HISI_IRQ_NUM, 0, + &hi6421_spmi_domain_ops, pmic); +diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c +index b6f497ce3e95c..0c934ca5adaa3 100644 +--- a/drivers/staging/media/rkisp1/rkisp1-capture.c ++++ b/drivers/staging/media/rkisp1/rkisp1-capture.c +@@ -992,6 +992,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) + + ret = pm_runtime_get_sync(cap->rkisp1->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(cap->rkisp1->dev); + dev_err(cap->rkisp1->dev, "power up failed %d\n", ret); + goto err_destroy_dummy; + } +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c +index 667b86dde1ee8..911f607d9b092 100644 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c +@@ -479,8 +479,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count) + + if (V4L2_TYPE_IS_OUTPUT(vq->type)) { + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(dev->dev); + goto err_cleanup; ++ } + + if (dev->dec_ops[ctx->current_codec]->start) { + ret = dev->dec_ops[ctx->current_codec]->start(ctx); +diff --git a/drivers/staging/vc04_services/vchiq-mmal/Kconfig b/drivers/staging/vc04_services/vchiq-mmal/Kconfig +index 500c0d12e4ff2..c99525a0bb452 100644 +--- a/drivers/staging/vc04_services/vchiq-mmal/Kconfig ++++ b/drivers/staging/vc04_services/vchiq-mmal/Kconfig +@@ -1,6 +1,6 @@ + config BCM2835_VCHIQ_MMAL + tristate "BCM2835 MMAL VCHIQ service" +- depends on (ARCH_BCM2835 || COMPILE_TEST) ++ depends on BCM2835_VCHIQ + help + Enables the MMAL API over VCHIQ interface as used for the + majority of the multimedia services on VideoCore. +diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c +index cc2959f22f01a..612f063c1cfcd 100644 +--- a/drivers/thermal/cpufreq_cooling.c ++++ b/drivers/thermal/cpufreq_cooling.c +@@ -438,13 +438,11 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, + if (cpufreq_cdev->cpufreq_state == state) + return 0; + +- cpufreq_cdev->cpufreq_state = state; +- + frequency = get_state_freq(cpufreq_cdev, state); + + ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); +- + if (ret > 0) { ++ cpufreq_cdev->cpufreq_state = state; + cpus = cpufreq_cdev->policy->cpus; + max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus)); + capacity = frequency * max_capacity; +diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c +index fa876e2c13e5d..f7d3023f860f0 100644 +--- a/drivers/tty/serial/8250/8250_mtk.c ++++ b/drivers/tty/serial/8250/8250_mtk.c +@@ -572,15 +572,22 @@ static int mtk8250_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + err = mtk8250_runtime_resume(&pdev->dev); + if (err) +- return err; ++ goto err_pm_disable; + + data->line = serial8250_register_8250_port(&uart); +- if (data->line < 0) +- return data->line; ++ if (data->line < 0) { ++ err = data->line; ++ goto err_pm_disable; ++ } + + data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1); + + return 0; ++ ++err_pm_disable: ++ pm_runtime_disable(&pdev->dev); ++ ++ return err; + } + + static int mtk8250_remove(struct platform_device *pdev) +diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c +index 063484b22523a..d6aef8a1f0a48 100644 +--- a/drivers/tty/serial/pmac_zilog.c ++++ b/drivers/tty/serial/pmac_zilog.c +@@ -1693,22 +1693,26 @@ static int __init pmz_probe(void) + + #else + ++/* On PCI PowerMacs, pmz_probe() does an explicit search of the OpenFirmware ++ * tree to obtain the device_nodes needed to start the console before the ++ * macio driver. On Macs without OpenFirmware, global platform_devices take ++ * the place of those device_nodes. ++ */ + extern struct platform_device scc_a_pdev, scc_b_pdev; + + static int __init pmz_init_port(struct uart_pmac_port *uap) + { +- struct resource *r_ports; +- int irq; ++ struct resource *r_ports, *r_irq; + + r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0); +- irq = platform_get_irq(uap->pdev, 0); +- if (!r_ports || irq <= 0) ++ r_irq = platform_get_resource(uap->pdev, IORESOURCE_IRQ, 0); ++ if (!r_ports || !r_irq) + return -ENODEV; + + uap->port.mapbase = r_ports->start; + uap->port.membase = (unsigned char __iomem *) r_ports->start; + uap->port.iotype = UPIO_MEM; +- uap->port.irq = irq; ++ uap->port.irq = r_irq->start; + uap->port.uartclk = ZS_CLOCK; + uap->port.fifosize = 1; + uap->port.ops = &pmz_pops; +diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c +index 8771a2ed69268..7f4a03e8647af 100644 +--- a/drivers/usb/host/ehci-omap.c ++++ b/drivers/usb/host/ehci-omap.c +@@ -220,6 +220,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) + + err_pm_runtime: + pm_runtime_put_sync(dev); ++ pm_runtime_disable(dev); + + err_phy: + for (i = 0; i < omap->nports; i++) { +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c +index 0894f6caccb2c..ebb8180b52ab1 100644 +--- a/drivers/usb/host/max3421-hcd.c ++++ b/drivers/usb/host/max3421-hcd.c +@@ -1847,7 +1847,7 @@ max3421_probe(struct spi_device *spi) + struct max3421_hcd *max3421_hcd; + struct usb_hcd *hcd = NULL; + struct max3421_hcd_platform_data *pdata = NULL; +- int retval = -ENOMEM; ++ int retval; + + if (spi_setup(spi) < 0) { + dev_err(&spi->dev, "Unable to setup SPI bus"); +@@ -1889,6 +1889,7 @@ max3421_probe(struct spi_device *spi) + goto error; + } + ++ retval = -ENOMEM; + hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev, + dev_name(&spi->dev)); + if (!hcd) { +diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c +index 27dbbe1b28b12..e832909a924fa 100644 +--- a/drivers/usb/host/oxu210hp-hcd.c ++++ b/drivers/usb/host/oxu210hp-hcd.c +@@ -4151,8 +4151,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev, + oxu->is_otg = otg; + + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); +- if (ret < 0) ++ if (ret < 0) { ++ usb_put_hcd(hcd); + return ERR_PTR(ret); ++ } + + device_wakeup_enable(hcd->self.controller); + return hcd; +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c +index 91055a191995f..0d606fa9fdca1 100644 +--- a/drivers/usb/serial/digi_acceleport.c ++++ b/drivers/usb/serial/digi_acceleport.c +@@ -19,7 +19,6 @@ + #include <linux/tty_flip.h> + #include <linux/module.h> + #include <linux/spinlock.h> +-#include <linux/workqueue.h> + #include <linux/uaccess.h> + #include <linux/usb.h> + #include <linux/wait.h> +@@ -198,14 +197,12 @@ struct digi_port { + int dp_throttle_restart; + wait_queue_head_t dp_flush_wait; + wait_queue_head_t dp_close_wait; /* wait queue for close */ +- struct work_struct dp_wakeup_work; + struct usb_serial_port *dp_port; + }; + + + /* Local Function Declarations */ + +-static void digi_wakeup_write_lock(struct work_struct *work); + static int digi_write_oob_command(struct usb_serial_port *port, + unsigned char *buf, int count, int interruptible); + static int digi_write_inb_command(struct usb_serial_port *port, +@@ -356,26 +353,6 @@ __releases(lock) + return timeout; + } + +- +-/* +- * Digi Wakeup Write +- * +- * Wake up port, line discipline, and tty processes sleeping +- * on writes. +- */ +- +-static void digi_wakeup_write_lock(struct work_struct *work) +-{ +- struct digi_port *priv = +- container_of(work, struct digi_port, dp_wakeup_work); +- struct usb_serial_port *port = priv->dp_port; +- unsigned long flags; +- +- spin_lock_irqsave(&priv->dp_port_lock, flags); +- tty_port_tty_wakeup(&port->port); +- spin_unlock_irqrestore(&priv->dp_port_lock, flags); +-} +- + /* + * Digi Write OOB Command + * +@@ -986,6 +963,7 @@ static void digi_write_bulk_callback(struct urb *urb) + unsigned long flags; + int ret = 0; + int status = urb->status; ++ bool wakeup; + + /* port and serial sanity check */ + if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) { +@@ -1012,6 +990,7 @@ static void digi_write_bulk_callback(struct urb *urb) + } + + /* try to send any buffered data on this port */ ++ wakeup = true; + spin_lock_irqsave(&priv->dp_port_lock, flags); + priv->dp_write_urb_in_use = 0; + if (priv->dp_out_buf_len > 0) { +@@ -1027,19 +1006,18 @@ static void digi_write_bulk_callback(struct urb *urb) + if (ret == 0) { + priv->dp_write_urb_in_use = 1; + priv->dp_out_buf_len = 0; ++ wakeup = false; + } + } +- /* wake up processes sleeping on writes immediately */ +- tty_port_tty_wakeup(&port->port); +- /* also queue up a wakeup at scheduler time, in case we */ +- /* lost the race in write_chan(). */ +- schedule_work(&priv->dp_wakeup_work); +- + spin_unlock_irqrestore(&priv->dp_port_lock, flags); ++ + if (ret && ret != -EPERM) + dev_err_console(port, + "%s: usb_submit_urb failed, ret=%d, port=%d\n", + __func__, ret, priv->dp_port_num); ++ ++ if (wakeup) ++ tty_port_tty_wakeup(&port->port); + } + + static int digi_write_room(struct tty_struct *tty) +@@ -1239,7 +1217,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num) + init_waitqueue_head(&priv->dp_transmit_idle_wait); + init_waitqueue_head(&priv->dp_flush_wait); + init_waitqueue_head(&priv->dp_close_wait); +- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); + priv->dp_port = port; + + init_waitqueue_head(&port->write_wait); +@@ -1508,13 +1485,14 @@ static int digi_read_oob_callback(struct urb *urb) + rts = C_CRTSCTS(tty); + + if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) { ++ bool wakeup = false; ++ + spin_lock_irqsave(&priv->dp_port_lock, flags); + /* convert from digi flags to termiox flags */ + if (val & DIGI_READ_INPUT_SIGNALS_CTS) { + priv->dp_modem_signals |= TIOCM_CTS; +- /* port must be open to use tty struct */ + if (rts) +- tty_port_tty_wakeup(&port->port); ++ wakeup = true; + } else { + priv->dp_modem_signals &= ~TIOCM_CTS; + /* port must be open to use tty struct */ +@@ -1533,6 +1511,9 @@ static int digi_read_oob_callback(struct urb *urb) + priv->dp_modem_signals &= ~TIOCM_CD; + + spin_unlock_irqrestore(&priv->dp_port_lock, flags); ++ ++ if (wakeup) ++ tty_port_tty_wakeup(&port->port); + } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) { + spin_lock_irqsave(&priv->dp_port_lock, flags); + priv->dp_transmit_idle = 1; +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index c1333919716b6..39ed3ad323651 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -40,11 +40,12 @@ + #define DRIVER_AUTHOR "Brian Warner <warner@lothar.com>" + #define DRIVER_DESC "USB Keyspan PDA Converter driver" + ++#define KEYSPAN_TX_THRESHOLD 16 ++ + struct keyspan_pda_private { + int tx_room; + int tx_throttled; +- struct work_struct wakeup_work; +- struct work_struct unthrottle_work; ++ struct work_struct unthrottle_work; + struct usb_serial *serial; + struct usb_serial_port *port; + }; +@@ -97,15 +98,6 @@ static const struct usb_device_id id_table_fake_xircom[] = { + }; + #endif + +-static void keyspan_pda_wakeup_write(struct work_struct *work) +-{ +- struct keyspan_pda_private *priv = +- container_of(work, struct keyspan_pda_private, wakeup_work); +- struct usb_serial_port *port = priv->port; +- +- tty_port_tty_wakeup(&port->port); +-} +- + static void keyspan_pda_request_unthrottle(struct work_struct *work) + { + struct keyspan_pda_private *priv = +@@ -120,7 +112,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work) + 7, /* request_unthrottle */ + USB_TYPE_VENDOR | USB_RECIP_INTERFACE + | USB_DIR_OUT, +- 16, /* value: threshold */ ++ KEYSPAN_TX_THRESHOLD, + 0, /* index */ + NULL, + 0, +@@ -139,6 +131,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + int retval; + int status = urb->status; + struct keyspan_pda_private *priv; ++ unsigned long flags; ++ + priv = usb_get_serial_port_data(port); + + switch (status) { +@@ -172,18 +166,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + break; + case 1: + /* status interrupt */ +- if (len < 3) { ++ if (len < 2) { + dev_warn(&port->dev, "short interrupt message received\n"); + break; + } +- dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]); ++ dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]); + switch (data[1]) { + case 1: /* modemline change */ + break; + case 2: /* tx unthrottle interrupt */ ++ spin_lock_irqsave(&port->lock, flags); + priv->tx_throttled = 0; ++ priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD); ++ spin_unlock_irqrestore(&port->lock, flags); + /* queue up a wakeup at scheduler time */ +- schedule_work(&priv->wakeup_work); ++ usb_serial_port_softint(port); + break; + default: + break; +@@ -443,6 +440,7 @@ static int keyspan_pda_write(struct tty_struct *tty, + int request_unthrottle = 0; + int rc = 0; + struct keyspan_pda_private *priv; ++ unsigned long flags; + + priv = usb_get_serial_port_data(port); + /* guess how much room is left in the device's ring buffer, and if we +@@ -462,13 +460,13 @@ static int keyspan_pda_write(struct tty_struct *tty, + the TX urb is in-flight (wait until it completes) + the device is full (wait until it says there is room) + */ +- spin_lock_bh(&port->lock); ++ spin_lock_irqsave(&port->lock, flags); + if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) { +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + return 0; + } + clear_bit(0, &port->write_urbs_free); +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + + /* At this point the URB is in our control, nobody else can submit it + again (the only sudden transition was the one from EINPROGRESS to +@@ -514,7 +512,8 @@ static int keyspan_pda_write(struct tty_struct *tty, + goto exit; + } + } +- if (count > priv->tx_room) { ++ ++ if (count >= priv->tx_room) { + /* we're about to completely fill the Tx buffer, so + we'll be throttled afterwards. */ + count = priv->tx_room; +@@ -547,7 +546,7 @@ static int keyspan_pda_write(struct tty_struct *tty, + + rc = count; + exit: +- if (rc < 0) ++ if (rc <= 0) + set_bit(0, &port->write_urbs_free); + return rc; + } +@@ -562,21 +561,24 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb) + priv = usb_get_serial_port_data(port); + + /* queue up a wakeup at scheduler time */ +- schedule_work(&priv->wakeup_work); ++ usb_serial_port_softint(port); + } + + + static int keyspan_pda_write_room(struct tty_struct *tty) + { + struct usb_serial_port *port = tty->driver_data; +- struct keyspan_pda_private *priv; +- priv = usb_get_serial_port_data(port); +- /* used by n_tty.c for processing of tabs and such. Giving it our +- conservative guess is probably good enough, but needs testing by +- running a console through the device. */ +- return priv->tx_room; +-} ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port); ++ unsigned long flags; ++ int room = 0; + ++ spin_lock_irqsave(&port->lock, flags); ++ if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled) ++ room = priv->tx_room; ++ spin_unlock_irqrestore(&port->lock, flags); ++ ++ return room; ++} + + static int keyspan_pda_chars_in_buffer(struct tty_struct *tty) + { +@@ -656,8 +658,12 @@ error: + } + static void keyspan_pda_close(struct usb_serial_port *port) + { ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port); ++ + usb_kill_urb(port->write_urb); + usb_kill_urb(port->interrupt_in_urb); ++ ++ cancel_work_sync(&priv->unthrottle_work); + } + + +@@ -714,7 +720,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port) + if (!priv) + return -ENOMEM; + +- INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); + INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); + priv->serial = port->serial; + priv->port = port; +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 5eed1078fac87..5a5d2a95070ed 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -639,6 +639,8 @@ static void parport_mos7715_restore_state(struct parport *pp, + spin_unlock(&release_lock); + return; + } ++ mos_parport->shadowDCR = s->u.pc.ctr; ++ mos_parport->shadowECR = s->u.pc.ecr; + write_parport_reg_nonblock(mos_parport, MOS7720_DCR, + mos_parport->shadowDCR); + write_parport_reg_nonblock(mos_parport, MOS7720_ECR, +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c +index 1fa6fcac82992..81b932f72e103 100644 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c +@@ -464,6 +464,11 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq) + static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) + { + mlx5_cq_set_ci(&mvq->cq.mcq); ++ ++ /* make sure CQ cosumer update is visible to the hardware before updating ++ * RX doorbell record. ++ */ ++ dma_wmb(); + rx_post(&mvq->vqqp, num); + if (mvq->event_cb.callback) + mvq->event_cb.callback(mvq->event_cb.private); +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index e6190173482c7..706de3ef94bbf 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -161,8 +161,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) + int i; + struct vfio_pci_dummy_resource *dummy_res; + +- INIT_LIST_HEAD(&vdev->dummy_resources_list); +- + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + int bar = i + PCI_STD_RESOURCES; + +@@ -1635,8 +1633,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) + + mutex_unlock(&vdev->vma_lock); + +- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, +- vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + + up_out: +@@ -1966,6 +1964,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + mutex_init(&vdev->igate); + spin_lock_init(&vdev->irqlock); + mutex_init(&vdev->ioeventfds_lock); ++ INIT_LIST_HEAD(&vdev->dummy_resources_list); + INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); +diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c +index 65c61710c0e9a..9adcf6a8f8885 100644 +--- a/drivers/vfio/pci/vfio_pci_nvlink2.c ++++ b/drivers/vfio/pci/vfio_pci_nvlink2.c +@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev) + return -EINVAL; + + if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) +- return -EINVAL; ++ return -ENODEV; + + mem_node = of_find_node_by_phandle(mem_phandle); + if (!mem_node) +@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) + int ret; + struct vfio_pci_npu2_data *data; + struct device_node *nvlink_dn; +- u32 nvlink_index = 0; ++ u32 nvlink_index = 0, mem_phandle = 0; + struct pci_dev *npdev = vdev->pdev; + struct device_node *npu_node = pci_device_to_OF_node(npdev); + struct pci_controller *hose = pci_bus_to_host(npdev->bus); +@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) + if (!pnv_pci_get_gpu_dev(vdev->pdev)) + return -ENODEV; + ++ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) ++ return -ENODEV; ++ + /* + * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links + * so we can allocate one register per link, using nvlink index as +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 6ff8a50966915..4ce9f00ae10e8 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -1643,7 +1643,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, + if (!vhost_vq_is_setup(vq)) + continue; + +- if (vhost_scsi_setup_vq_cmds(vq, vq->num)) ++ ret = vhost_scsi_setup_vq_cmds(vq, vq->num); ++ if (ret) + goto destroy_vq_cmds; + } + +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c +index 8c1d47e52b1a6..355b6120dc4f0 100644 +--- a/drivers/video/fbdev/atmel_lcdfb.c ++++ b/drivers/video/fbdev/atmel_lcdfb.c +@@ -987,8 +987,8 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) + } + + INIT_LIST_HEAD(&pdata->pwr_gpios); +- ret = -ENOMEM; + for (i = 0; i < gpiod_count(dev, "atmel,power-control"); i++) { ++ ret = -ENOMEM; + gpiod = devm_gpiod_get_index(dev, "atmel,power-control", + i, GPIOD_ASIS); + if (IS_ERR(gpiod)) +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index becc776979602..71e16b53e9c18 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -1608,7 +1608,6 @@ static struct virtqueue *vring_create_virtqueue_packed( + vq->num_added = 0; + vq->packed_ring = true; + vq->use_dma_api = vring_use_dma_api(vdev); +- list_add_tail(&vq->vq.list, &vdev->vqs); + #ifdef DEBUG + vq->in_use = false; + vq->last_add_time_valid = false; +@@ -1669,6 +1668,7 @@ static struct virtqueue *vring_create_virtqueue_packed( + cpu_to_le16(vq->packed.event_flags_shadow); + } + ++ list_add_tail(&vq->vq.list, &vdev->vqs); + return &vq->vq; + + err_desc_extra: +@@ -1676,9 +1676,9 @@ err_desc_extra: + err_desc_state: + kfree(vq); + err_vq: +- vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr); ++ vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr); + err_device: +- vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr); ++ vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr); + err_driver: + vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr); + err_ring: +@@ -2085,7 +2085,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, + vq->last_used_idx = 0; + vq->num_added = 0; + vq->use_dma_api = vring_use_dma_api(vdev); +- list_add_tail(&vq->vq.list, &vdev->vqs); + #ifdef DEBUG + vq->in_use = false; + vq->last_add_time_valid = false; +@@ -2127,6 +2126,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, + memset(vq->split.desc_state, 0, vring.num * + sizeof(struct vring_desc_state_split)); + ++ list_add_tail(&vq->vq.list, &vdev->vqs); + return &vq->vq; + } + EXPORT_SYMBOL_GPL(__vring_new_virtqueue); +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig +index fd7968635e6df..db935d6b10c27 100644 +--- a/drivers/watchdog/Kconfig ++++ b/drivers/watchdog/Kconfig +@@ -386,6 +386,7 @@ config ARM_SBSA_WATCHDOG + config ARMADA_37XX_WATCHDOG + tristate "Armada 37xx watchdog" + depends on ARCH_MVEBU || COMPILE_TEST ++ depends on HAS_IOMEM + select MFD_SYSCON + select WATCHDOG_CORE + help +@@ -631,7 +632,7 @@ config SUNXI_WATCHDOG + + config COH901327_WATCHDOG + bool "ST-Ericsson COH 901 327 watchdog" +- depends on ARCH_U300 || (ARM && COMPILE_TEST) ++ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST) + default y if MACH_U300 + select WATCHDOG_CORE + help +@@ -789,6 +790,7 @@ config MOXART_WDT + + config SIRFSOC_WATCHDOG + tristate "SiRFSOC watchdog" ++ depends on HAS_IOMEM + depends on ARCH_SIRF || COMPILE_TEST + select WATCHDOG_CORE + default y +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c +index ab7465d186fda..cdf754233e53d 100644 +--- a/drivers/watchdog/qcom-wdt.c ++++ b/drivers/watchdog/qcom-wdt.c +@@ -148,7 +148,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action, + */ + wmb(); + +- msleep(150); ++ mdelay(150); + return 0; + } + +diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c +index 65cb55f3916fc..b9b1daa9e2a4c 100644 +--- a/drivers/watchdog/sprd_wdt.c ++++ b/drivers/watchdog/sprd_wdt.c +@@ -108,18 +108,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout, + u32 tmr_step = timeout * SPRD_WDT_CNT_STEP; + u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP; + +- sprd_wdt_unlock(wdt->base); +- writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & +- SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH); +- writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK), +- wdt->base + SPRD_WDT_LOAD_LOW); +- writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & +- SPRD_WDT_LOW_VALUE_MASK, +- wdt->base + SPRD_WDT_IRQ_LOAD_HIGH); +- writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK, +- wdt->base + SPRD_WDT_IRQ_LOAD_LOW); +- sprd_wdt_lock(wdt->base); +- + /* + * Waiting the load value operation done, + * it needs two or three RTC clock cycles. +@@ -134,6 +122,19 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout, + + if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT) + return -EBUSY; ++ ++ sprd_wdt_unlock(wdt->base); ++ writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & ++ SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH); ++ writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK), ++ wdt->base + SPRD_WDT_LOAD_LOW); ++ writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & ++ SPRD_WDT_LOW_VALUE_MASK, ++ wdt->base + SPRD_WDT_IRQ_LOAD_HIGH); ++ writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK, ++ wdt->base + SPRD_WDT_IRQ_LOAD_LOW); ++ sprd_wdt_lock(wdt->base); ++ + return 0; + } + +@@ -345,15 +346,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev) + if (ret) + return ret; + +- if (watchdog_active(&wdt->wdd)) { ++ if (watchdog_active(&wdt->wdd)) + ret = sprd_wdt_start(&wdt->wdd); +- if (ret) { +- sprd_wdt_disable(wdt); +- return ret; +- } +- } + +- return 0; ++ return ret; + } + + static const struct dev_pm_ops sprd_wdt_pm_ops = { +diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c +index 4238447578128..0e9a99559609c 100644 +--- a/drivers/watchdog/watchdog_core.c ++++ b/drivers/watchdog/watchdog_core.c +@@ -267,15 +267,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd) + } + + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { +- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; +- +- ret = register_reboot_notifier(&wdd->reboot_nb); +- if (ret) { +- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", +- wdd->id, ret); +- watchdog_dev_unregister(wdd); +- ida_simple_remove(&watchdog_ida, id); +- return ret; ++ if (!wdd->ops->stop) ++ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id); ++ else { ++ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; ++ ++ ret = register_reboot_notifier(&wdd->reboot_nb); ++ if (ret) { ++ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", ++ wdd->id, ret); ++ watchdog_dev_unregister(wdd); ++ ida_simple_remove(&watchdog_ida, id); ++ return ret; ++ } + } + } + +diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c +index 4b99ec3dec58a..e7c692cfb2cf8 100644 +--- a/drivers/xen/xen-pciback/xenbus.c ++++ b/drivers/xen/xen-pciback/xenbus.c +@@ -689,7 +689,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev, + + /* watch the backend node for backend configuration information */ + err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch, +- xen_pcibk_be_watch); ++ NULL, xen_pcibk_be_watch); + if (err) + goto out; + +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h +index 5f5b8a7d5b80b..2a93b7c9c1599 100644 +--- a/drivers/xen/xenbus/xenbus.h ++++ b/drivers/xen/xenbus/xenbus.h +@@ -44,6 +44,8 @@ struct xen_bus_type { + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); + int (*probe)(struct xen_bus_type *bus, const char *type, + const char *dir); ++ bool (*otherend_will_handle)(struct xenbus_watch *watch, ++ const char *path, const char *token); + void (*otherend_changed)(struct xenbus_watch *watch, const char *path, + const char *token); + struct bus_type bus; +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c +index fd80e318b99cc..0cd728961fce9 100644 +--- a/drivers/xen/xenbus/xenbus_client.c ++++ b/drivers/xen/xenbus/xenbus_client.c +@@ -127,18 +127,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); + */ + int xenbus_watch_path(struct xenbus_device *dev, const char *path, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char *, const char *), + void (*callback)(struct xenbus_watch *, + const char *, const char *)) + { + int err; + + watch->node = path; ++ watch->will_handle = will_handle; + watch->callback = callback; + + err = register_xenbus_watch(watch); + + if (err) { + watch->node = NULL; ++ watch->will_handle = NULL; + watch->callback = NULL; + xenbus_dev_fatal(dev, err, "adding watch on %s", path); + } +@@ -165,6 +169,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); + */ + int xenbus_watch_pathfmt(struct xenbus_device *dev, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char *, const char *), + void (*callback)(struct xenbus_watch *, + const char *, const char *), + const char *pathfmt, ...) +@@ -181,7 +187,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, + xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); + return -ENOMEM; + } +- err = xenbus_watch_path(dev, path, watch, callback); ++ err = xenbus_watch_path(dev, path, watch, will_handle, callback); + + if (err) + kfree(path); +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index 38725d97d9093..44634d970a5ca 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev) + container_of(dev->dev.bus, struct xen_bus_type, bus); + + return xenbus_watch_pathfmt(dev, &dev->otherend_watch, ++ bus->otherend_will_handle, + bus->otherend_changed, + "%s/%s", dev->otherend, "state"); + } +diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c +index 2ba699897e6dd..5abded97e1a7e 100644 +--- a/drivers/xen/xenbus/xenbus_probe_backend.c ++++ b/drivers/xen/xenbus/xenbus_probe_backend.c +@@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, + return err; + } + ++static bool frontend_will_handle(struct xenbus_watch *watch, ++ const char *path, const char *token) ++{ ++ return watch->nr_pending == 0; ++} ++ + static void frontend_changed(struct xenbus_watch *watch, + const char *path, const char *token) + { +@@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = { + .levels = 3, /* backend/type/<frontend>/<id> */ + .get_bus_id = backend_bus_id, + .probe = xenbus_probe_backend, ++ .otherend_will_handle = frontend_will_handle, + .otherend_changed = frontend_changed, + .bus = { + .name = "xen-backend", +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c +index 3a06eb699f333..12e02eb01f599 100644 +--- a/drivers/xen/xenbus/xenbus_xs.c ++++ b/drivers/xen/xenbus/xenbus_xs.c +@@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event) + + spin_lock(&watches_lock); + event->handle = find_watch(event->token); +- if (event->handle != NULL) { ++ if (event->handle != NULL && ++ (!event->handle->will_handle || ++ event->handle->will_handle(event->handle, ++ event->path, event->token))) { + spin_lock(&watch_events_lock); + list_add_tail(&event->list, &watch_events); ++ event->handle->nr_pending++; + wake_up(&watch_events_waitq); + spin_unlock(&watch_events_lock); + } else +@@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch) + + sprintf(token, "%lX", (long)watch); + ++ watch->nr_pending = 0; ++ + down_read(&xs_watch_rwsem); + + spin_lock(&watches_lock); +@@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch) + + /* Cancel pending watch events. */ + spin_lock(&watch_events_lock); +- list_for_each_entry_safe(event, tmp, &watch_events, list) { +- if (event->handle != watch) +- continue; +- list_del(&event->list); +- kfree(event); ++ if (watch->nr_pending) { ++ list_for_each_entry_safe(event, tmp, &watch_events, list) { ++ if (event->handle != watch) ++ continue; ++ list_del(&event->list); ++ kfree(event); ++ } ++ watch->nr_pending = 0; + } + spin_unlock(&watch_events_lock); + +@@ -865,7 +874,6 @@ void xs_suspend_cancel(void) + + static int xenwatch_thread(void *unused) + { +- struct list_head *ent; + struct xs_watch_event *event; + + xenwatch_pid = current->pid; +@@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused) + mutex_lock(&xenwatch_mutex); + + spin_lock(&watch_events_lock); +- ent = watch_events.next; +- if (ent != &watch_events) +- list_del(ent); ++ event = list_first_entry_or_null(&watch_events, ++ struct xs_watch_event, list); ++ if (event) { ++ list_del(&event->list); ++ event->handle->nr_pending--; ++ } + spin_unlock(&watch_events_lock); + +- if (ent != &watch_events) { +- event = list_entry(ent, struct xs_watch_event, list); ++ if (event) { + event->handle->callback(event->handle, event->path, + event->token); + kfree(event); +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 0b29bdb251050..62461239600fc 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -2593,7 +2593,6 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, + u64 start, u64 len, int delalloc); + int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, + u64 len); +-void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); + int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); + int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_ref *generic_ref); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 5fd60b13f4f83..4209dbd6286e4 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -2730,31 +2730,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) + atomic_inc(&bg->reservations); + } + +-void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) +-{ +- struct btrfs_caching_control *next; +- struct btrfs_caching_control *caching_ctl; +- struct btrfs_block_group *cache; +- +- down_write(&fs_info->commit_root_sem); +- +- list_for_each_entry_safe(caching_ctl, next, +- &fs_info->caching_block_groups, list) { +- cache = caching_ctl->block_group; +- if (btrfs_block_group_done(cache)) { +- cache->last_byte_to_unpin = (u64)-1; +- list_del_init(&caching_ctl->list); +- btrfs_put_caching_control(caching_ctl); +- } else { +- cache->last_byte_to_unpin = caching_ctl->progress; +- } +- } +- +- up_write(&fs_info->commit_root_sem); +- +- btrfs_update_global_block_rsv(fs_info); +-} +- + /* + * Returns the free cluster for the given space info and sets empty_cluster to + * what it should be based on the mount options. +@@ -2816,10 +2791,10 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, + len = cache->start + cache->length - start; + len = min(len, end + 1 - start); + +- if (start < cache->last_byte_to_unpin) { +- len = min(len, cache->last_byte_to_unpin - start); +- if (return_free_space) +- btrfs_add_free_space(cache, start, len); ++ if (start < cache->last_byte_to_unpin && return_free_space) { ++ u64 add_len = min(len, cache->last_byte_to_unpin - start); ++ ++ btrfs_add_free_space(cache, start, add_len); + } + + start += len; +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 69a384145dc6f..e8ca229a216be 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1275,6 +1275,7 @@ static int cluster_pages_for_defrag(struct inode *inode, + u64 page_end; + u64 page_cnt; + u64 start = (u64)start_index << PAGE_SHIFT; ++ u64 search_start; + int ret; + int i; + int i_done; +@@ -1371,6 +1372,40 @@ again: + + lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state); ++ ++ /* ++ * When defragmenting we skip ranges that have holes or inline extents, ++ * (check should_defrag_range()), to avoid unnecessary IO and wasting ++ * space. At btrfs_defrag_file(), we check if a range should be defragged ++ * before locking the inode and then, if it should, we trigger a sync ++ * page cache readahead - we lock the inode only after that to avoid ++ * blocking for too long other tasks that possibly want to operate on ++ * other file ranges. But before we were able to get the inode lock, ++ * some other task may have punched a hole in the range, or we may have ++ * now an inline extent, in which case we should not defrag. So check ++ * for that here, where we have the inode and the range locked, and bail ++ * out if that happened. ++ */ ++ search_start = page_start; ++ while (search_start < page_end) { ++ struct extent_map *em; ++ ++ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start, ++ page_end - search_start); ++ if (IS_ERR(em)) { ++ ret = PTR_ERR(em); ++ goto out_unlock_range; ++ } ++ if (em->block_start >= EXTENT_MAP_LAST_BYTE) { ++ free_extent_map(em); ++ /* Ok, 0 means we did not defrag anything */ ++ ret = 0; ++ goto out_unlock_range; ++ } ++ search_start = extent_map_end(em); ++ free_extent_map(em); ++ } ++ + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, 0, 0, &cached_state); +@@ -1401,6 +1436,10 @@ again: + btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); + extent_changeset_free(data_reserved); + return i_done; ++ ++out_unlock_range: ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, ++ page_start, page_end - 1, &cached_state); + out: + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 52ada47aff50d..96dbfc011f45d 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -155,6 +155,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) + struct btrfs_transaction *cur_trans = trans->transaction; + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *root, *tmp; ++ struct btrfs_caching_control *caching_ctl, *next; + + down_write(&fs_info->commit_root_sem); + list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, +@@ -180,6 +181,45 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) + spin_lock(&cur_trans->dropped_roots_lock); + } + spin_unlock(&cur_trans->dropped_roots_lock); ++ ++ /* ++ * We have to update the last_byte_to_unpin under the commit_root_sem, ++ * at the same time we swap out the commit roots. ++ * ++ * This is because we must have a real view of the last spot the caching ++ * kthreads were while caching. Consider the following views of the ++ * extent tree for a block group ++ * ++ * commit root ++ * +----+----+----+----+----+----+----+ ++ * |\\\\| |\\\\|\\\\| |\\\\|\\\\| ++ * +----+----+----+----+----+----+----+ ++ * 0 1 2 3 4 5 6 7 ++ * ++ * new commit root ++ * +----+----+----+----+----+----+----+ ++ * | | | |\\\\| | |\\\\| ++ * +----+----+----+----+----+----+----+ ++ * 0 1 2 3 4 5 6 7 ++ * ++ * If the cache_ctl->progress was at 3, then we are only allowed to ++ * unpin [0,1) and [2,3], because the caching thread has already ++ * processed those extents. We are not allowed to unpin [5,6), because ++ * the caching thread will re-start it's search from 3, and thus find ++ * the hole from [4,6) to add to the free space cache. ++ */ ++ list_for_each_entry_safe(caching_ctl, next, ++ &fs_info->caching_block_groups, list) { ++ struct btrfs_block_group *cache = caching_ctl->block_group; ++ ++ if (btrfs_block_group_done(cache)) { ++ cache->last_byte_to_unpin = (u64)-1; ++ list_del_init(&caching_ctl->list); ++ btrfs_put_caching_control(caching_ctl); ++ } else { ++ cache->last_byte_to_unpin = caching_ctl->progress; ++ } ++ } + up_write(&fs_info->commit_root_sem); + } + +@@ -2293,8 +2333,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) + goto unlock_tree_log; + } + +- btrfs_prepare_extent_commit(fs_info); +- + cur_trans = fs_info->running_transaction; + + btrfs_set_root_node(&fs_info->tree_root->root_item, +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index ded4229c314a0..2b200b5a44c3a 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -1140,12 +1140,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) + { + struct ceph_mds_session *session = cap->session; + struct ceph_inode_info *ci = cap->ci; +- struct ceph_mds_client *mdsc = +- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; ++ struct ceph_mds_client *mdsc; + int removed = 0; + ++ /* 'ci' being NULL means the remove have already occurred */ ++ if (!ci) { ++ dout("%s: cap inode is NULL\n", __func__); ++ return; ++ } ++ + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + ++ mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; ++ + /* remove from inode's cap rbtree, and clear auth cap */ + rb_erase(&cap->ci_node, &ci->i_caps); + if (ci->i_auth_cap == cap) { +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index d88e2683626e7..2da6b41cb5526 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -94,6 +94,8 @@ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { + /* SMB2_OPLOCK_BREAK */ cpu_to_le16(24) + }; + ++#define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_sync_hdr) + sizeof(struct smb2_negotiate_rsp)) ++ + static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len, + __u32 non_ctxlen) + { +@@ -109,11 +111,17 @@ static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len, + + /* Make sure that negotiate contexts start after gss security blob */ + nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset); +- if (nc_offset < non_ctxlen) { +- pr_warn_once("Invalid negotiate context offset\n"); ++ if (nc_offset + 1 < non_ctxlen) { ++ pr_warn_once("Invalid negotiate context offset %d\n", nc_offset); + return 0; +- } +- size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen; ++ } else if (nc_offset + 1 == non_ctxlen) { ++ cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n"); ++ size_of_pad_before_neg_ctxts = 0; ++ } else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE) ++ /* has padding, but no SPNEGO blob */ ++ size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1; ++ else ++ size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen; + + /* Verify that at least minimal negotiate contexts fit within frame */ + if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) { +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 3d914d7d0d110..22f1d8dc12b00 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -477,7 +477,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, + goto out; + } + +- if (bytes_left || p->Next) ++ /* Azure rounds the buffer size up 8, to a 16 byte boundary */ ++ if ((bytes_left > 8) || p->Next) + cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); + + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index acb72705062dd..fc06c762fbbf6 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -427,8 +427,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) + pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; + pneg_ctxt->DataLength = cpu_to_le16(38); + pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); +- pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); +- get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); ++ pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE); ++ get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE); + pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; + } + +@@ -566,6 +566,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) + if (len < MIN_PREAUTH_CTXT_DATA_LEN) { + pr_warn_once("server sent bad preauth context\n"); + return; ++ } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { ++ pr_warn_once("server sent invalid SaltLength\n"); ++ return; + } + if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) + pr_warn_once("Invalid SMB3 hash algorithm count\n"); +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index fa57b03ca98c4..204a622b89ed3 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -333,12 +333,20 @@ struct smb2_neg_context { + /* Followed by array of data */ + } __packed; + +-#define SMB311_SALT_SIZE 32 ++#define SMB311_LINUX_CLIENT_SALT_SIZE 32 + /* Hash Algorithm Types */ + #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001) + #define SMB2_PREAUTH_HASH_SIZE 64 + +-#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6) ++/* ++ * SaltLength that the server send can be zero, so the only three required ++ * fields (all __le16) end up six bytes total, so the minimum context data len ++ * in the response is six bytes which accounts for ++ * ++ * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm. ++ */ ++#define MIN_PREAUTH_CTXT_DATA_LEN 6 ++ + struct smb2_preauth_neg_context { + __le16 ContextType; /* 1 */ + __le16 DataLength; +@@ -346,7 +354,7 @@ struct smb2_preauth_neg_context { + __le16 HashAlgorithmCount; /* 1 */ + __le16 SaltLength; + __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */ +- __u8 Salt[SMB311_SALT_SIZE]; ++ __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE]; + } __packed; + + /* Encryption Algorithms Ciphers */ +diff --git a/fs/erofs/data.c b/fs/erofs/data.c +index 347be146884c3..ea4f693bee224 100644 +--- a/fs/erofs/data.c ++++ b/fs/erofs/data.c +@@ -312,27 +312,12 @@ static void erofs_raw_access_readahead(struct readahead_control *rac) + submit_bio(bio); + } + +-static int erofs_get_block(struct inode *inode, sector_t iblock, +- struct buffer_head *bh, int create) +-{ +- struct erofs_map_blocks map = { +- .m_la = iblock << 9, +- }; +- int err; +- +- err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); +- if (err) +- return err; +- +- if (map.m_flags & EROFS_MAP_MAPPED) +- bh->b_blocknr = erofs_blknr(map.m_pa); +- +- return err; +-} +- + static sector_t erofs_bmap(struct address_space *mapping, sector_t block) + { + struct inode *inode = mapping->host; ++ struct erofs_map_blocks map = { ++ .m_la = blknr_to_addr(block), ++ }; + + if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) { + erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; +@@ -341,7 +326,10 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) + return 0; + } + +- return generic_block_bmap(mapping, block, erofs_get_block); ++ if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW)) ++ return erofs_blknr(map.m_pa); ++ ++ return 0; + } + + /* for uncompressed (aligned) files and raw access for other files */ +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 4df61129566d4..117b1c395ae4a 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1902,23 +1902,30 @@ fetch_events: + } + write_unlock_irq(&ep->lock); + +- if (eavail || res) +- break; ++ if (!eavail && !res) ++ timed_out = !schedule_hrtimeout_range(to, slack, ++ HRTIMER_MODE_ABS); + +- if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) { +- timed_out = 1; +- break; +- } +- +- /* We were woken up, thus go and try to harvest some events */ ++ /* ++ * We were woken up, thus go and try to harvest some events. ++ * If timed out and still on the wait queue, recheck eavail ++ * carefully under lock, below. ++ */ + eavail = 1; +- + } while (0); + + __set_current_state(TASK_RUNNING); + + if (!list_empty_careful(&wait.entry)) { + write_lock_irq(&ep->lock); ++ /* ++ * If the thread timed out and is not on the wait queue, it ++ * means that the thread was woken up after its timeout expired ++ * before it could reacquire the lock. Thus, when wait.entry is ++ * empty, it needs to harvest events. ++ */ ++ if (timed_out) ++ eavail = list_empty(&wait.entry); + __remove_wait_queue(&ep->wq, &wait); + write_unlock_irq(&ep->lock); + } +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 17d7096b3212d..12eac88373032 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -5815,8 +5815,8 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, + int ret; + + path = ext4_find_extent(inode, start, NULL, 0); +- if (!path) +- return -EINVAL; ++ if (IS_ERR(path)) ++ return PTR_ERR(path); + ex = path[path->p_depth].p_ext; + if (!ex) { + ret = -EFSCORRUPTED; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 0d8385aea8981..0afab6d5c65bd 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -175,6 +175,7 @@ void ext4_evict_inode(struct inode *inode) + */ + int extra_credits = 6; + struct ext4_xattr_inode_array *ea_inode_array = NULL; ++ bool freeze_protected = false; + + trace_ext4_evict_inode(inode); + +@@ -232,9 +233,14 @@ void ext4_evict_inode(struct inode *inode) + + /* + * Protect us against freezing - iput() caller didn't have to have any +- * protection against it ++ * protection against it. When we are in a running transaction though, ++ * we are already protected against freezing and we cannot grab further ++ * protection due to lock ordering constraints. + */ +- sb_start_intwrite(inode->i_sb); ++ if (!ext4_journal_current_handle()) { ++ sb_start_intwrite(inode->i_sb); ++ freeze_protected = true; ++ } + + if (!IS_NOQUOTA(inode)) + extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); +@@ -253,7 +259,8 @@ void ext4_evict_inode(struct inode *inode) + * cleaned up. + */ + ext4_orphan_del(NULL, inode); +- sb_end_intwrite(inode->i_sb); ++ if (freeze_protected) ++ sb_end_intwrite(inode->i_sb); + goto no_delete; + } + +@@ -294,7 +301,8 @@ void ext4_evict_inode(struct inode *inode) + stop_handle: + ext4_journal_stop(handle); + ext4_orphan_del(NULL, inode); +- sb_end_intwrite(inode->i_sb); ++ if (freeze_protected) ++ sb_end_intwrite(inode->i_sb); + ext4_xattr_inode_array_free(ea_inode_array); + goto no_delete; + } +@@ -323,7 +331,8 @@ stop_handle: + else + ext4_free_inode(handle, inode); + ext4_journal_stop(handle); +- sb_end_intwrite(inode->i_sb); ++ if (freeze_protected) ++ sb_end_intwrite(inode->i_sb); + ext4_xattr_inode_array_free(ea_inode_array); + return; + no_delete: +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 24af9ed5c3e52..37a619bf1ac7c 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -5126,6 +5126,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, + ext4_group_first_block_no(sb, group) + + EXT4_C2B(sbi, cluster), + "Block already on to-be-freed list"); ++ kmem_cache_free(ext4_free_data_cachep, new_entry); + return 0; + } + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 94472044f4c1d..2b08b162075c3 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -666,19 +666,17 @@ static bool system_going_down(void) + + static void ext4_handle_error(struct super_block *sb) + { ++ journal_t *journal = EXT4_SB(sb)->s_journal; ++ + if (test_opt(sb, WARN_ON_ERROR)) + WARN_ON_ONCE(1); + +- if (sb_rdonly(sb)) ++ if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT)) + return; + +- if (!test_opt(sb, ERRORS_CONT)) { +- journal_t *journal = EXT4_SB(sb)->s_journal; +- +- ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED); +- if (journal) +- jbd2_journal_abort(journal, -EIO); +- } ++ ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED); ++ if (journal) ++ jbd2_journal_abort(journal, -EIO); + /* + * We force ERRORS_RO behavior when system is rebooting. Otherwise we + * could panic during 'reboot -f' as the underlying device got already +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index d5d8ce077f295..42394de6c7eb1 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page) + + static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) + { +- return f2fs_get_meta_page(sbi, current_nat_addr(sbi, nid)); ++ return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); + } + + static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 00eff2f518079..fef22e476c526 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -3918,6 +3918,7 @@ free_bio_info: + + #ifdef CONFIG_UNICODE + utf8_unload(sb->s_encoding); ++ sb->s_encoding = NULL; + #endif + free_options: + #ifdef CONFIG_QUOTA +diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c +index 21a9e534417c0..d2c0e58c6416f 100644 +--- a/fs/fuse/virtio_fs.c ++++ b/fs/fuse/virtio_fs.c +@@ -1464,6 +1464,8 @@ static int virtio_fs_get_tree(struct fs_context *fsc) + if (!sb->s_root) { + err = virtio_fs_fill_super(sb, fsc); + if (err) { ++ fuse_mount_put(fm); ++ sb->s_fs_info = NULL; + deactivate_locked_super(sb); + return err; + } +diff --git a/fs/inode.c b/fs/inode.c +index 9d78c37b00b81..5eea9912a0b9d 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -1627,7 +1627,9 @@ static void iput_final(struct inode *inode) + else + drop = generic_drop_inode(inode); + +- if (!drop && (sb->s_flags & SB_ACTIVE)) { ++ if (!drop && ++ !(inode->i_state & I_DONTCACHE) && ++ (sb->s_flags & SB_ACTIVE)) { + inode_add_lru(inode); + spin_unlock(&inode->i_lock); + return; +diff --git a/fs/io-wq.h b/fs/io-wq.h +index cba36f03c3555..aaa363f358916 100644 +--- a/fs/io-wq.h ++++ b/fs/io-wq.h +@@ -59,6 +59,7 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node, + list->last->next = node; + list->last = node; + } ++ node->next = NULL; + } + + static inline void wq_list_cut(struct io_wq_work_list *list, +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 86dac2b2e2763..0fcd065baa760 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -1641,10 +1641,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + + spin_lock_irqsave(&ctx->completion_lock, flags); + +- /* if force is set, the ring is going away. always drop after that */ +- if (force) +- ctx->cq_overflow_flushed = 1; +- + cqe = NULL; + list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { + if (tsk && req->task != tsk) +@@ -2246,7 +2242,7 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) + * we wake up the task, and the next invocation will flush the + * entries. We cannot safely to it from here. + */ +- if (noflush && !list_empty(&ctx->cq_overflow_list)) ++ if (noflush) + return -1U; + + io_cqring_overflow_flush(ctx, false, NULL, NULL); +@@ -3052,9 +3048,7 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, + iov[0].iov_len = kbuf->len; + return 0; + } +- if (!req->rw.len) +- return 0; +- else if (req->rw.len > 1) ++ if (req->rw.len != 1) + return -EINVAL; + + #ifdef CONFIG_COMPAT +@@ -3948,11 +3942,17 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, + head = idr_find(&ctx->io_buffer_idr, p->bgid); + if (head) + ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); +- +- io_ring_submit_lock(ctx, !force_nonblock); + if (ret < 0) + req_set_fail_links(req); +- __io_req_complete(req, ret, 0, cs); ++ ++ /* need to hold the lock to complete IOPOLL requests */ ++ if (ctx->flags & IORING_SETUP_IOPOLL) { ++ __io_req_complete(req, ret, 0, cs); ++ io_ring_submit_unlock(ctx, !force_nonblock); ++ } else { ++ io_ring_submit_unlock(ctx, !force_nonblock); ++ __io_req_complete(req, ret, 0, cs); ++ } + return 0; + } + +@@ -4037,10 +4037,17 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, + } + } + out: +- io_ring_submit_unlock(ctx, !force_nonblock); + if (ret < 0) + req_set_fail_links(req); +- __io_req_complete(req, ret, 0, cs); ++ ++ /* need to hold the lock to complete IOPOLL requests */ ++ if (ctx->flags & IORING_SETUP_IOPOLL) { ++ __io_req_complete(req, ret, 0, cs); ++ io_ring_submit_unlock(ctx, !force_nonblock); ++ } else { ++ io_ring_submit_unlock(ctx, !force_nonblock); ++ __io_req_complete(req, ret, 0, cs); ++ } + return 0; + } + +@@ -6074,8 +6081,28 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) + } + + if (ret) { ++ struct io_ring_ctx *lock_ctx = NULL; ++ ++ if (req->ctx->flags & IORING_SETUP_IOPOLL) ++ lock_ctx = req->ctx; ++ ++ /* ++ * io_iopoll_complete() does not hold completion_lock to ++ * complete polled io, so here for polled io, we can not call ++ * io_req_complete() directly, otherwise there maybe concurrent ++ * access to cqring, defer_list, etc, which is not safe. Given ++ * that io_iopoll_complete() is always called under uring_lock, ++ * so here for polled io, we also get uring_lock to complete ++ * it. ++ */ ++ if (lock_ctx) ++ mutex_lock(&lock_ctx->uring_lock); ++ + req_set_fail_links(req); + io_req_complete(req, ret); ++ ++ if (lock_ctx) ++ mutex_unlock(&lock_ctx->uring_lock); + } + + return io_steal_work(req); +@@ -8369,28 +8396,35 @@ static void io_ring_exit_work(struct work_struct *work) + * as nobody else will be looking for them. + */ + do { +- if (ctx->rings) +- io_cqring_overflow_flush(ctx, true, NULL, NULL); + io_iopoll_try_reap_events(ctx); + } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); + io_ring_ctx_free(ctx); + } + ++static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) ++{ ++ struct io_kiocb *req = container_of(work, struct io_kiocb, work); ++ ++ return req->ctx == data; ++} ++ + static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) + { + mutex_lock(&ctx->uring_lock); + percpu_ref_kill(&ctx->refs); ++ /* if force is set, the ring is going away. always drop after that */ ++ ctx->cq_overflow_flushed = 1; ++ if (ctx->rings) ++ io_cqring_overflow_flush(ctx, true, NULL, NULL); + mutex_unlock(&ctx->uring_lock); + + io_kill_timeouts(ctx, NULL); + io_poll_remove_all(ctx, NULL); + + if (ctx->io_wq) +- io_wq_cancel_all(ctx->io_wq); ++ io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true); + + /* if we failed setting up the ctx, we might not have any rings */ +- if (ctx->rings) +- io_cqring_overflow_flush(ctx, true, NULL, NULL); + io_iopoll_try_reap_events(ctx); + idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); + +@@ -8421,14 +8455,6 @@ static int io_uring_release(struct inode *inode, struct file *file) + return 0; + } + +-static bool io_wq_files_match(struct io_wq_work *work, void *data) +-{ +- struct files_struct *files = data; +- +- return !files || ((work->flags & IO_WQ_WORK_FILES) && +- work->identity->files == files); +-} +- + /* + * Returns true if 'preq' is the link parent of 'req' + */ +@@ -8566,21 +8592,20 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, + * Returns true if we found and killed one or more files pinning requests + */ + static bool io_uring_cancel_files(struct io_ring_ctx *ctx, ++ struct task_struct *task, + struct files_struct *files) + { + if (list_empty_careful(&ctx->inflight_list)) + return false; + +- /* cancel all at once, should be faster than doing it one by one*/ +- io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); +- + while (!list_empty_careful(&ctx->inflight_list)) { + struct io_kiocb *cancel_req = NULL, *req; + DEFINE_WAIT(wait); + + spin_lock_irq(&ctx->inflight_lock); + list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { +- if (files && (req->work.flags & IO_WQ_WORK_FILES) && ++ if (req->task == task && ++ (req->work.flags & IO_WQ_WORK_FILES) && + req->work.identity->files != files) + continue; + /* req is being completed, ignore */ +@@ -8623,7 +8648,7 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + { + bool ret; + +- ret = io_uring_cancel_files(ctx, files); ++ ret = io_uring_cancel_files(ctx, task, files); + if (!files) { + enum io_wq_cancel cret; + +@@ -8662,12 +8687,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + io_sq_thread_park(ctx->sq_data); + } + +- if (files) +- io_cancel_defer_files(ctx, NULL, files); +- else +- io_cancel_defer_files(ctx, task, NULL); +- ++ io_cancel_defer_files(ctx, task, files); ++ io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); + io_cqring_overflow_flush(ctx, true, task, files); ++ io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); + + while (__io_uring_cancel_task_requests(ctx, task, files)) { + io_run_task_work(); +@@ -8692,10 +8715,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) + { + struct io_uring_task *tctx = current->io_uring; ++ int ret; + + if (unlikely(!tctx)) { +- int ret; +- + ret = io_uring_alloc_task_context(current); + if (unlikely(ret)) + return ret; +@@ -8706,7 +8728,12 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) + + if (!old) { + get_file(file); +- xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL); ++ ret = xa_err(xa_store(&tctx->xa, (unsigned long)file, ++ file, GFP_KERNEL)); ++ if (ret) { ++ fput(file); ++ return ret; ++ } + } + tctx->last = file; + } +@@ -8969,8 +8996,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, + */ + ret = 0; + if (ctx->flags & IORING_SETUP_SQPOLL) { ++ io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); + if (!list_empty_careful(&ctx->cq_overflow_list)) + io_cqring_overflow_flush(ctx, false, NULL, NULL); ++ io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL)); + if (flags & IORING_ENTER_SQ_WAKEUP) + wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_WAIT) +@@ -9173,55 +9202,52 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, + return 0; + } + ++static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) ++{ ++ int ret, fd; ++ ++ fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); ++ if (fd < 0) ++ return fd; ++ ++ ret = io_uring_add_task_file(ctx, file); ++ if (ret) { ++ put_unused_fd(fd); ++ return ret; ++ } ++ fd_install(fd, file); ++ return fd; ++} ++ + /* + * Allocate an anonymous fd, this is what constitutes the application + * visible backing of an io_uring instance. The application mmaps this + * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, + * we have to tie this fd to a socket for file garbage collection purposes. + */ +-static int io_uring_get_fd(struct io_ring_ctx *ctx) ++static struct file *io_uring_get_file(struct io_ring_ctx *ctx) + { + struct file *file; ++#if defined(CONFIG_UNIX) + int ret; +- int fd; + +-#if defined(CONFIG_UNIX) + ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, + &ctx->ring_sock); + if (ret) +- return ret; ++ return ERR_PTR(ret); + #endif + +- ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC); +- if (ret < 0) +- goto err; +- fd = ret; +- + file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, + O_RDWR | O_CLOEXEC); +- if (IS_ERR(file)) { +- put_unused_fd(fd); +- ret = PTR_ERR(file); +- goto err; +- } +- + #if defined(CONFIG_UNIX) +- ctx->ring_sock->file = file; +-#endif +- ret = io_uring_add_task_file(ctx, file); +- if (ret) { +- fput(file); +- put_unused_fd(fd); +- goto err; ++ if (IS_ERR(file)) { ++ sock_release(ctx->ring_sock); ++ ctx->ring_sock = NULL; ++ } else { ++ ctx->ring_sock->file = file; + } +- fd_install(fd, file); +- return fd; +-err: +-#if defined(CONFIG_UNIX) +- sock_release(ctx->ring_sock); +- ctx->ring_sock = NULL; + #endif +- return ret; ++ return file; + } + + static int io_uring_create(unsigned entries, struct io_uring_params *p, +@@ -9229,6 +9255,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, + { + struct user_struct *user = NULL; + struct io_ring_ctx *ctx; ++ struct file *file; + bool limit_mem; + int ret; + +@@ -9375,13 +9402,22 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, + goto err; + } + ++ file = io_uring_get_file(ctx); ++ if (IS_ERR(file)) { ++ ret = PTR_ERR(file); ++ goto err; ++ } ++ + /* + * Install ring fd as the very last thing, so we don't risk someone + * having closed it before we finish setup + */ +- ret = io_uring_get_fd(ctx); +- if (ret < 0) +- goto err; ++ ret = io_uring_install_fd(ctx, file); ++ if (ret < 0) { ++ /* fput will clean it up */ ++ fput(file); ++ return ret; ++ } + + trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); + return ret; +diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c +index 2f6f0b140c05a..03b4f99614bef 100644 +--- a/fs/jffs2/readinode.c ++++ b/fs/jffs2/readinode.c +@@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r + jffs2_free_full_dirent(fd); + return -EIO; + } ++ ++#ifdef CONFIG_JFFS2_SUMMARY ++ /* ++ * we use CONFIG_JFFS2_SUMMARY because without it, we ++ * have checked it while mounting ++ */ ++ crc = crc32(0, fd->name, rd->nsize); ++ if (unlikely(crc != je32_to_cpu(rd->name_crc))) { ++ JFFS2_NOTICE("name CRC failed on dirent node at" ++ "%#08x: read %#08x,calculated %#08x\n", ++ ref_offset(ref), je32_to_cpu(rd->node_crc), crc); ++ jffs2_mark_node_obsolete(c, ref); ++ jffs2_free_full_dirent(fd); ++ return 0; ++ } ++#endif + } + + fd->nhash = full_name_hash(NULL, fd->name, rd->nsize); +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c +index 05d7878dfad15..4fd297bdf0f3f 100644 +--- a/fs/jffs2/super.c ++++ b/fs/jffs2/super.c +@@ -215,11 +215,28 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param) + return 0; + } + ++static inline void jffs2_update_mount_opts(struct fs_context *fc) ++{ ++ struct jffs2_sb_info *new_c = fc->s_fs_info; ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(fc->root->d_sb); ++ ++ mutex_lock(&c->alloc_sem); ++ if (new_c->mount_opts.override_compr) { ++ c->mount_opts.override_compr = new_c->mount_opts.override_compr; ++ c->mount_opts.compr = new_c->mount_opts.compr; ++ } ++ if (new_c->mount_opts.rp_size) ++ c->mount_opts.rp_size = new_c->mount_opts.rp_size; ++ mutex_unlock(&c->alloc_sem); ++} ++ + static int jffs2_reconfigure(struct fs_context *fc) + { + struct super_block *sb = fc->root->d_sb; + + sync_filesystem(sb); ++ jffs2_update_mount_opts(fc); ++ + return jffs2_do_remount_fs(sb, fc); + } + +diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h +index 29891fad3f095..aa03a904d5ab2 100644 +--- a/fs/jfs/jfs_dmap.h ++++ b/fs/jfs/jfs_dmap.h +@@ -183,7 +183,7 @@ typedef union dmtree { + #define dmt_leafidx t1.leafidx + #define dmt_height t1.height + #define dmt_budmin t1.budmin +-#define dmt_stree t1.stree ++#define dmt_stree t2.stree + + /* + * on-disk aggregate disk allocation map descriptor. +diff --git a/fs/lockd/host.c b/fs/lockd/host.c +index 0afb6d59bad03..771c289f6df7f 100644 +--- a/fs/lockd/host.c ++++ b/fs/lockd/host.c +@@ -439,12 +439,7 @@ nlm_bind_host(struct nlm_host *host) + * RPC rebind is required + */ + if ((clnt = host->h_rpcclnt) != NULL) { +- if (time_after_eq(jiffies, host->h_nextrebind)) { +- rpc_force_rebind(clnt); +- host->h_nextrebind = jiffies + NLM_HOST_REBIND; +- dprintk("lockd: next rebind in %lu jiffies\n", +- host->h_nextrebind - jiffies); +- } ++ nlm_rebind_host(host); + } else { + unsigned long increment = nlmsvc_timeout; + struct rpc_timeout timeparms = { +@@ -494,13 +489,20 @@ nlm_bind_host(struct nlm_host *host) + return clnt; + } + +-/* +- * Force a portmap lookup of the remote lockd port ++/** ++ * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port ++ * @host: NLM host handle for peer ++ * ++ * This is not needed when using a connection-oriented protocol, such as TCP. ++ * The existing autobind mechanism is sufficient to force a rebind when ++ * required, e.g. on connection state transitions. + */ + void + nlm_rebind_host(struct nlm_host *host) + { +- dprintk("lockd: rebind host %s\n", host->h_name); ++ if (host->h_proto != IPPROTO_UDP) ++ return; ++ + if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { + rpc_force_rebind(host->h_rpcclnt); + host->h_nextrebind = jiffies + NLM_HOST_REBIND; +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 24bf5797f88ae..fd0eda328943b 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -1056,7 +1056,7 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr) + u32 idx = hdr->pgio_mirror_idx + 1; + u32 new_idx = 0; + +- if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx)) ++ if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx)) + ff_layout_send_layouterror(hdr->lseg); + else + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index aa6493905bbe8..43af053f467a7 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -2180,7 +2180,7 @@ static int nfsiod_start(void) + { + struct workqueue_struct *wq; + dprintk("RPC: creating workqueue nfsiod\n"); +- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); ++ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); + if (wq == NULL) + return -ENOMEM; + nfsiod_workqueue = wq; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index e89468678ae16..6858b4bb556d5 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -4961,12 +4961,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred, + u64 cookie, struct page **pages, unsigned int count, bool plus) + { + struct inode *dir = d_inode(dentry); ++ struct nfs_server *server = NFS_SERVER(dir); + struct nfs4_readdir_arg args = { + .fh = NFS_FH(dir), + .pages = pages, + .pgbase = 0, + .count = count, +- .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, + .plus = plus, + }; + struct nfs4_readdir_res res; +@@ -4981,9 +4981,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred, + dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, + dentry, + (unsigned long long)cookie); ++ if (!(server->caps & NFS_CAP_SECURITY_LABEL)) ++ args.bitmask = server->attr_bitmask_nl; ++ else ++ args.bitmask = server->attr_bitmask; ++ + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); + res.pgbase = args.pgbase; +- status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); ++ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, ++ &res.seq_res, 0); + if (status >= 0) { + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); + status += args.pgbase; +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index c6dbfcae75171..c16b93df1bc14 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -3009,15 +3009,19 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; ++ uint32_t replen; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); ++ ++ replen = hdr.replen + op_decode_hdr_maxsz; ++ + encode_getdeviceinfo(xdr, args, &hdr); + +- /* set up reply kvec. Subtract notification bitmap max size (2) +- * so that notification bitmap is put in xdr_buf tail */ ++ /* set up reply kvec. device_addr4 opaque data is read into the ++ * pages */ + rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase, +- args->pdev->pglen, hdr.replen - 2); ++ args->pdev->pglen, replen + 2 + 1); + encode_nops(&hdr); + } + +diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c +index b73d9dd37f73c..26f2a50eceac9 100644 +--- a/fs/nfs_common/grace.c ++++ b/fs/nfs_common/grace.c +@@ -69,10 +69,14 @@ __state_in_grace(struct net *net, bool open) + if (!open) + return !list_empty(grace_list); + ++ spin_lock(&grace_lock); + list_for_each_entry(lm, grace_list, list) { +- if (lm->block_opens) ++ if (lm->block_opens) { ++ spin_unlock(&grace_lock); + return true; ++ } + } ++ spin_unlock(&grace_lock); + return false; + } + +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c +index 3c6c2f7d1688b..5849c1bd88f17 100644 +--- a/fs/nfsd/filecache.c ++++ b/fs/nfsd/filecache.c +@@ -600,7 +600,7 @@ static struct notifier_block nfsd_file_lease_notifier = { + static int + nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *name) ++ const struct qstr *name, u32 cookie) + { + trace_nfsd_file_fsnotify_handle_event(inode, mask); + +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index d7f27ed6b7941..47006eec724e6 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -769,6 +769,7 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, + spin_lock(&nn->s2s_cp_lock); + new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); + stid->stid.si_opaque.so_id = new_id; ++ stid->stid.si_generation = 1; + spin_unlock(&nn->s2s_cp_lock); + idr_preload_end(); + if (new_id < 0) +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 27b1ad1361508..9323e30a7eafe 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -527,8 +527,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net) + return; + + nfsd_shutdown_net(net); +- printk(KERN_WARNING "nfsd: last server has exited, flushing export " +- "cache\n"); ++ pr_info("nfsd: last server has exited, flushing export cache\n"); + nfsd_export_flush(net); + } + +diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c +index 5dcda8f20c04f..e45ca6ecba959 100644 +--- a/fs/notify/dnotify/dnotify.c ++++ b/fs/notify/dnotify/dnotify.c +@@ -72,7 +72,7 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) + */ + static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *name) ++ const struct qstr *name, u32 cookie) + { + struct dnotify_mark *dn_mark; + struct dnotify_struct *dn; +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index 9167884a61eca..1192c99536200 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -268,12 +268,11 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, + continue; + + /* +- * If the event is for a child and this mark is on a parent not ++ * If the event is on a child and this mark is on a parent not + * watching children, don't send it! + */ +- if (event_mask & FS_EVENT_ON_CHILD && +- type == FSNOTIFY_OBJ_TYPE_INODE && +- !(mark->mask & FS_EVENT_ON_CHILD)) ++ if (type == FSNOTIFY_OBJ_TYPE_PARENT && ++ !(mark->mask & FS_EVENT_ON_CHILD)) + continue; + + marks_mask |= mark->mask; +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c +index 8d3ad5ef29258..30d422b8c0fc7 100644 +--- a/fs/notify/fsnotify.c ++++ b/fs/notify/fsnotify.c +@@ -152,6 +152,13 @@ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt, + if (mask & FS_ISDIR) + return false; + ++ /* ++ * All events that are possible on child can also may be reported with ++ * parent/name info to inode/sb/mount. Otherwise, a watching parent ++ * could result in events reported with unexpected name info to sb/mount. ++ */ ++ BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT); ++ + /* Did either inode/sb/mount subscribe for events with parent/name? */ + marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask); + marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask); +@@ -232,47 +239,76 @@ notify: + } + EXPORT_SYMBOL_GPL(__fsnotify_parent); + ++static int fsnotify_handle_inode_event(struct fsnotify_group *group, ++ struct fsnotify_mark *inode_mark, ++ u32 mask, const void *data, int data_type, ++ struct inode *dir, const struct qstr *name, ++ u32 cookie) ++{ ++ const struct path *path = fsnotify_data_path(data, data_type); ++ struct inode *inode = fsnotify_data_inode(data, data_type); ++ const struct fsnotify_ops *ops = group->ops; ++ ++ if (WARN_ON_ONCE(!ops->handle_inode_event)) ++ return 0; ++ ++ if ((inode_mark->mask & FS_EXCL_UNLINK) && ++ path && d_unlinked(path->dentry)) ++ return 0; ++ ++ /* Check interest of this mark in case event was sent with two marks */ ++ if (!(mask & inode_mark->mask & ALL_FSNOTIFY_EVENTS)) ++ return 0; ++ ++ return ops->handle_inode_event(inode_mark, mask, inode, dir, name, cookie); ++} ++ + static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *name, + u32 cookie, struct fsnotify_iter_info *iter_info) + { + struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); +- struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); +- struct inode *inode = fsnotify_data_inode(data, data_type); +- const struct fsnotify_ops *ops = group->ops; ++ struct fsnotify_mark *parent_mark = fsnotify_iter_parent_mark(iter_info); + int ret; + +- if (WARN_ON_ONCE(!ops->handle_inode_event)) +- return 0; +- + if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || + WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) + return 0; + +- /* +- * An event can be sent on child mark iterator instead of inode mark +- * iterator because of other groups that have interest of this inode +- * and have marks on both parent and child. We can simplify this case. +- */ +- if (!inode_mark) { +- inode_mark = child_mark; +- child_mark = NULL; ++ if (parent_mark) { ++ /* ++ * parent_mark indicates that the parent inode is watching ++ * children and interested in this event, which is an event ++ * possible on child. But is *this mark* watching children and ++ * interested in this event? ++ */ ++ if (parent_mark->mask & FS_EVENT_ON_CHILD) { ++ ret = fsnotify_handle_inode_event(group, parent_mark, mask, ++ data, data_type, dir, name, 0); ++ if (ret) ++ return ret; ++ } ++ if (!inode_mark) ++ return 0; ++ } ++ ++ if (mask & FS_EVENT_ON_CHILD) { ++ /* ++ * Some events can be sent on both parent dir and child marks ++ * (e.g. FS_ATTRIB). If both parent dir and child are ++ * watching, report the event once to parent dir with name (if ++ * interested) and once to child without name (if interested). ++ * The child watcher is expecting an event without a file name ++ * and without the FS_EVENT_ON_CHILD flag. ++ */ ++ mask &= ~FS_EVENT_ON_CHILD; + dir = NULL; + name = NULL; + } + +- ret = ops->handle_inode_event(inode_mark, mask, inode, dir, name); +- if (ret || !child_mark) +- return ret; +- +- /* +- * Some events can be sent on both parent dir and child marks +- * (e.g. FS_ATTRIB). If both parent dir and child are watching, +- * report the event once to parent dir with name and once to child +- * without name. +- */ +- return ops->handle_inode_event(child_mark, mask, inode, NULL, NULL); ++ return fsnotify_handle_inode_event(group, inode_mark, mask, data, data_type, ++ dir, name, cookie); + } + + static int send_to_group(__u32 mask, const void *data, int data_type, +@@ -430,7 +466,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + struct fsnotify_iter_info iter_info = {}; + struct super_block *sb; + struct mount *mnt = NULL; +- struct inode *child = NULL; ++ struct inode *parent = NULL; + int ret = 0; + __u32 test_mask, marks_mask; + +@@ -442,11 +478,10 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + inode = dir; + } else if (mask & FS_EVENT_ON_CHILD) { + /* +- * Event on child - report on TYPE_INODE to dir if it is +- * watching children and on TYPE_CHILD to child. ++ * Event on child - report on TYPE_PARENT to dir if it is ++ * watching children and on TYPE_INODE to child. + */ +- child = inode; +- inode = dir; ++ parent = dir; + } + sb = inode->i_sb; + +@@ -460,7 +495,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + if (!sb->s_fsnotify_marks && + (!mnt || !mnt->mnt_fsnotify_marks) && + (!inode || !inode->i_fsnotify_marks) && +- (!child || !child->i_fsnotify_marks)) ++ (!parent || !parent->i_fsnotify_marks)) + return 0; + + marks_mask = sb->s_fsnotify_mask; +@@ -468,8 +503,8 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + marks_mask |= mnt->mnt_fsnotify_mask; + if (inode) + marks_mask |= inode->i_fsnotify_mask; +- if (child) +- marks_mask |= child->i_fsnotify_mask; ++ if (parent) ++ marks_mask |= parent->i_fsnotify_mask; + + + /* +@@ -492,9 +527,9 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = + fsnotify_first_mark(&inode->i_fsnotify_marks); + } +- if (child) { +- iter_info.marks[FSNOTIFY_OBJ_TYPE_CHILD] = +- fsnotify_first_mark(&child->i_fsnotify_marks); ++ if (parent) { ++ iter_info.marks[FSNOTIFY_OBJ_TYPE_PARENT] = ++ fsnotify_first_mark(&parent->i_fsnotify_marks); + } + + /* +diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h +index 4327d0e9c3645..2007e37119160 100644 +--- a/fs/notify/inotify/inotify.h ++++ b/fs/notify/inotify/inotify.h +@@ -24,11 +24,10 @@ static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) + + extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, + struct fsnotify_group *group); +-extern int inotify_handle_event(struct fsnotify_group *group, u32 mask, +- const void *data, int data_type, +- struct inode *dir, +- const struct qstr *file_name, u32 cookie, +- struct fsnotify_iter_info *iter_info); ++extern int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, ++ u32 mask, struct inode *inode, ++ struct inode *dir, ++ const struct qstr *name, u32 cookie); + + extern const struct fsnotify_ops inotify_fsnotify_ops; + extern struct kmem_cache *inotify_inode_mark_cachep; +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c +index 9ddcbadc98e29..1901d799909b8 100644 +--- a/fs/notify/inotify/inotify_fsnotify.c ++++ b/fs/notify/inotify/inotify_fsnotify.c +@@ -55,25 +55,21 @@ static int inotify_merge(struct list_head *list, + return event_compare(last_event, event); + } + +-static int inotify_one_event(struct fsnotify_group *group, u32 mask, +- struct fsnotify_mark *inode_mark, +- const struct path *path, +- const struct qstr *file_name, u32 cookie) ++int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, ++ struct inode *inode, struct inode *dir, ++ const struct qstr *name, u32 cookie) + { + struct inotify_inode_mark *i_mark; + struct inotify_event_info *event; + struct fsnotify_event *fsn_event; ++ struct fsnotify_group *group = inode_mark->group; + int ret; + int len = 0; + int alloc_len = sizeof(struct inotify_event_info); + struct mem_cgroup *old_memcg; + +- if ((inode_mark->mask & FS_EXCL_UNLINK) && +- path && d_unlinked(path->dentry)) +- return 0; +- +- if (file_name) { +- len = file_name->len; ++ if (name) { ++ len = name->len; + alloc_len += len + 1; + } + +@@ -117,7 +113,7 @@ static int inotify_one_event(struct fsnotify_group *group, u32 mask, + event->sync_cookie = cookie; + event->name_len = len; + if (len) +- strcpy(event->name, file_name->name); ++ strcpy(event->name, name->name); + + ret = fsnotify_add_event(group, fsn_event, inotify_merge); + if (ret) { +@@ -131,37 +127,6 @@ static int inotify_one_event(struct fsnotify_group *group, u32 mask, + return 0; + } + +-int inotify_handle_event(struct fsnotify_group *group, u32 mask, +- const void *data, int data_type, struct inode *dir, +- const struct qstr *file_name, u32 cookie, +- struct fsnotify_iter_info *iter_info) +-{ +- const struct path *path = fsnotify_data_path(data, data_type); +- struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); +- struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); +- int ret = 0; +- +- if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) +- return 0; +- +- /* +- * Some events cannot be sent on both parent and child marks +- * (e.g. IN_CREATE). Those events are always sent on inode_mark. +- * For events that are possible on both parent and child (e.g. IN_OPEN), +- * event is sent on inode_mark with name if the parent is watching and +- * is sent on child_mark without name if child is watching. +- * If both parent and child are watching, report the event with child's +- * name here and report another event without child's name below. +- */ +- if (inode_mark) +- ret = inotify_one_event(group, mask, inode_mark, path, +- file_name, cookie); +- if (ret || !child_mark) +- return ret; +- +- return inotify_one_event(group, mask, child_mark, path, NULL, 0); +-} +- + static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) + { + inotify_ignored_and_remove_idr(fsn_mark, group); +@@ -227,7 +192,7 @@ static void inotify_free_mark(struct fsnotify_mark *fsn_mark) + } + + const struct fsnotify_ops inotify_fsnotify_ops = { +- .handle_event = inotify_handle_event, ++ .handle_inode_event = inotify_handle_inode_event, + .free_group_priv = inotify_free_group_priv, + .free_event = inotify_free_event, + .freeing_mark = inotify_freeing_mark, +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 186722ba38947..5f6c6bf65909c 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -486,14 +486,10 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, + struct fsnotify_group *group) + { + struct inotify_inode_mark *i_mark; +- struct fsnotify_iter_info iter_info = { }; +- +- fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE, +- fsn_mark); + + /* Queue ignore event for the watch */ +- inotify_handle_event(group, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, +- NULL, NULL, 0, &iter_info); ++ inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL, ++ 0); + + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); + /* remove this mark from the idr */ +diff --git a/fs/open.c b/fs/open.c +index 9af548fb841b0..4d7537ae59df5 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -1010,6 +1010,10 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) + if (how->resolve & ~VALID_RESOLVE_FLAGS) + return -EINVAL; + ++ /* Scoping flags are mutually exclusive. */ ++ if ((how->resolve & RESOLVE_BENEATH) && (how->resolve & RESOLVE_IN_ROOT)) ++ return -EINVAL; ++ + /* Deal with the mode. */ + if (WILL_CREATE(flags)) { + if (how->mode & ~S_IALLUGO) +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c +index efccb7c1f9bc5..a1f72ac053e5f 100644 +--- a/fs/overlayfs/file.c ++++ b/fs/overlayfs/file.c +@@ -541,46 +541,31 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { + struct fd real; +- const struct cred *old_cred; + long ret; + + ret = ovl_real_fdget(file, &real); + if (ret) + return ret; + +- old_cred = ovl_override_creds(file_inode(file)->i_sb); + ret = security_file_ioctl(real.file, cmd, arg); +- if (!ret) ++ if (!ret) { ++ /* ++ * Don't override creds, since we currently can't safely check ++ * permissions before doing so. ++ */ + ret = vfs_ioctl(real.file, cmd, arg); +- revert_creds(old_cred); ++ } + + fdput(real); + + return ret; + } + +-static unsigned int ovl_iflags_to_fsflags(unsigned int iflags) +-{ +- unsigned int flags = 0; +- +- if (iflags & S_SYNC) +- flags |= FS_SYNC_FL; +- if (iflags & S_APPEND) +- flags |= FS_APPEND_FL; +- if (iflags & S_IMMUTABLE) +- flags |= FS_IMMUTABLE_FL; +- if (iflags & S_NOATIME) +- flags |= FS_NOATIME_FL; +- +- return flags; +-} +- + static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd, +- unsigned long arg, unsigned int flags) ++ unsigned long arg) + { + long ret; + struct inode *inode = file_inode(file); +- unsigned int oldflags; + + if (!inode_owner_or_capable(inode)) + return -EACCES; +@@ -591,10 +576,13 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd, + + inode_lock(inode); + +- /* Check the capability before cred override */ +- oldflags = ovl_iflags_to_fsflags(READ_ONCE(inode->i_flags)); +- ret = vfs_ioc_setflags_prepare(inode, oldflags, flags); +- if (ret) ++ /* ++ * Prevent copy up if immutable and has no CAP_LINUX_IMMUTABLE ++ * capability. ++ */ ++ ret = -EPERM; ++ if (!ovl_has_upperdata(inode) && IS_IMMUTABLE(inode) && ++ !capable(CAP_LINUX_IMMUTABLE)) + goto unlock; + + ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY); +@@ -613,46 +601,6 @@ unlock: + + } + +-static long ovl_ioctl_set_fsflags(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- unsigned int flags; +- +- if (get_user(flags, (int __user *) arg)) +- return -EFAULT; +- +- return ovl_ioctl_set_flags(file, cmd, arg, flags); +-} +- +-static unsigned int ovl_fsxflags_to_fsflags(unsigned int xflags) +-{ +- unsigned int flags = 0; +- +- if (xflags & FS_XFLAG_SYNC) +- flags |= FS_SYNC_FL; +- if (xflags & FS_XFLAG_APPEND) +- flags |= FS_APPEND_FL; +- if (xflags & FS_XFLAG_IMMUTABLE) +- flags |= FS_IMMUTABLE_FL; +- if (xflags & FS_XFLAG_NOATIME) +- flags |= FS_NOATIME_FL; +- +- return flags; +-} +- +-static long ovl_ioctl_set_fsxflags(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- struct fsxattr fa; +- +- memset(&fa, 0, sizeof(fa)); +- if (copy_from_user(&fa, (void __user *) arg, sizeof(fa))) +- return -EFAULT; +- +- return ovl_ioctl_set_flags(file, cmd, arg, +- ovl_fsxflags_to_fsflags(fa.fsx_xflags)); +-} +- + long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + { + long ret; +@@ -663,12 +611,9 @@ long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + ret = ovl_real_ioctl(file, cmd, arg); + break; + +- case FS_IOC_SETFLAGS: +- ret = ovl_ioctl_set_fsflags(file, cmd, arg); +- break; +- + case FS_IOC_FSSETXATTR: +- ret = ovl_ioctl_set_fsxflags(file, cmd, arg); ++ case FS_IOC_SETFLAGS: ++ ret = ovl_ioctl_set_flags(file, cmd, arg); + break; + + default: +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index b84663252adda..6c0a05f55d6b1 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -349,6 +349,16 @@ static const struct file_operations proc_dir_operations = { + .iterate_shared = proc_readdir, + }; + ++static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) ++{ ++ return 0; ++} ++ ++const struct dentry_operations proc_net_dentry_ops = { ++ .d_revalidate = proc_net_d_revalidate, ++ .d_delete = always_delete_dentry, ++}; ++ + /* + * proc directories can do almost nothing.. + */ +@@ -471,8 +481,8 @@ struct proc_dir_entry *proc_symlink(const char *name, + } + EXPORT_SYMBOL(proc_symlink); + +-struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, +- struct proc_dir_entry *parent, void *data) ++struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, ++ struct proc_dir_entry *parent, void *data, bool force_lookup) + { + struct proc_dir_entry *ent; + +@@ -484,10 +494,20 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, + ent->data = data; + ent->proc_dir_ops = &proc_dir_operations; + ent->proc_iops = &proc_dir_inode_operations; ++ if (force_lookup) { ++ pde_force_lookup(ent); ++ } + ent = proc_register(parent, ent); + } + return ent; + } ++EXPORT_SYMBOL_GPL(_proc_mkdir); ++ ++struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, ++ struct proc_dir_entry *parent, void *data) ++{ ++ return _proc_mkdir(name, mode, parent, data, false); ++} + EXPORT_SYMBOL_GPL(proc_mkdir_data); + + struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 917cc85e34663..afbe96b6bf77d 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -310,3 +310,10 @@ extern unsigned long task_statm(struct mm_struct *, + unsigned long *, unsigned long *, + unsigned long *, unsigned long *); + extern void task_mem(struct seq_file *, struct mm_struct *); ++ ++extern const struct dentry_operations proc_net_dentry_ops; ++static inline void pde_force_lookup(struct proc_dir_entry *pde) ++{ ++ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ ++ pde->proc_dops = &proc_net_dentry_ops; ++} +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c +index ed8a6306990c4..1aa9236bf1af5 100644 +--- a/fs/proc/proc_net.c ++++ b/fs/proc/proc_net.c +@@ -39,22 +39,6 @@ static struct net *get_proc_net(const struct inode *inode) + return maybe_get_net(PDE_NET(PDE(inode))); + } + +-static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) +-{ +- return 0; +-} +- +-static const struct dentry_operations proc_net_dentry_ops = { +- .d_revalidate = proc_net_d_revalidate, +- .d_delete = always_delete_dentry, +-}; +- +-static void pde_force_lookup(struct proc_dir_entry *pde) +-{ +- /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ +- pde->proc_dops = &proc_net_dentry_ops; +-} +- + static int seq_open_net(struct inode *inode, struct file *file) + { + unsigned int state_size = PDE(inode)->state_size; +diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c +index e59d4bb3a89e4..eafb75755fa37 100644 +--- a/fs/proc_namespace.c ++++ b/fs/proc_namespace.c +@@ -320,7 +320,8 @@ static int mountstats_open(struct inode *inode, struct file *file) + + const struct file_operations proc_mounts_operations = { + .open = mounts_open, +- .read = seq_read, ++ .read_iter = seq_read_iter, ++ .splice_read = generic_file_splice_read, + .llseek = seq_lseek, + .release = mounts_release, + .poll = mounts_poll, +@@ -328,7 +329,8 @@ const struct file_operations proc_mounts_operations = { + + const struct file_operations proc_mountinfo_operations = { + .open = mountinfo_open, +- .read = seq_read, ++ .read_iter = seq_read_iter, ++ .splice_read = generic_file_splice_read, + .llseek = seq_lseek, + .release = mounts_release, + .poll = mounts_poll, +@@ -336,7 +338,8 @@ const struct file_operations proc_mountinfo_operations = { + + const struct file_operations proc_mountstats_operations = { + .open = mountstats_open, +- .read = seq_read, ++ .read_iter = seq_read_iter, ++ .splice_read = generic_file_splice_read, + .llseek = seq_lseek, + .release = mounts_release, + }; +diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c +index b93b3cd10bfd3..8c50de693e1d4 100644 +--- a/fs/ubifs/auth.c ++++ b/fs/ubifs/auth.c +@@ -338,8 +338,10 @@ int ubifs_init_authentication(struct ubifs_info *c) + c->authenticated = true; + + c->log_hash = ubifs_hash_get_desc(c); +- if (IS_ERR(c->log_hash)) ++ if (IS_ERR(c->log_hash)) { ++ err = PTR_ERR(c->log_hash); + goto out_free_hmac; ++ } + + err = 0; + +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c +index 7e4bfaf2871fa..eae9cf5a57b05 100644 +--- a/fs/ubifs/io.c ++++ b/fs/ubifs/io.c +@@ -319,7 +319,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) + { + uint32_t crc; + +- ubifs_assert(c, pad >= 0 && !(pad & 7)); ++ ubifs_assert(c, pad >= 0); + + if (pad >= UBIFS_PAD_NODE_SZ) { + struct ubifs_ch *ch = buf; +@@ -764,6 +764,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + * write-buffer. + */ + memcpy(wbuf->buf + wbuf->used, buf, len); ++ if (aligned_len > len) { ++ ubifs_assert(c, aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); ++ } + + if (aligned_len == wbuf->avail) { + dbg_io("flush jhead %s wbuf to LEB %d:%d", +@@ -856,13 +860,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + } + + spin_lock(&wbuf->lock); +- if (aligned_len) ++ if (aligned_len) { + /* + * And now we have what's left and what does not take whole + * max. write unit, so write it to the write-buffer and we are + * done. + */ + memcpy(wbuf->buf, buf + written, len); ++ if (aligned_len > len) { ++ ubifs_assert(c, aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + len, aligned_len - len); ++ } ++ } + + if (c->leb_size - wbuf->offs >= c->max_write_size) + wbuf->size = c->max_write_size; +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index a3abcc4b7d9ff..6d1879bf94403 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -620,7 +620,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); + bool acpi_pm_device_can_wakeup(struct device *dev); + int acpi_pm_device_sleep_state(struct device *, int *, int); + int acpi_pm_set_device_wakeup(struct device *dev, bool enable); +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable); + #else + static inline void acpi_pm_wakeup_event(struct device *dev) + { +@@ -651,10 +650,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable) + { + return -ENODEV; + } +-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) +-{ +- return -ENODEV; +-} + #endif + + #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 8667d0cdc71e7..8bde32cf97115 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2878,8 +2878,7 @@ extern int inode_needs_sync(struct inode *inode); + extern int generic_delete_inode(struct inode *inode); + static inline int generic_drop_inode(struct inode *inode) + { +- return !inode->i_nlink || inode_unhashed(inode) || +- (inode->i_state & I_DONTCACHE); ++ return !inode->i_nlink || inode_unhashed(inode); + } + extern void d_mark_dontcache(struct inode *inode); + +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index f8529a3a29234..a2e42d3cd87cf 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -137,6 +137,7 @@ struct mem_cgroup; + * if @file_name is not NULL, this is the directory that + * @file_name is relative to. + * @file_name: optional file name associated with event ++ * @cookie: inotify rename cookie + * + * free_group_priv - called when a group refcnt hits 0 to clean up the private union + * freeing_mark - called when a mark is being destroyed for some reason. The group +@@ -151,7 +152,7 @@ struct fsnotify_ops { + struct fsnotify_iter_info *iter_info); + int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *file_name); ++ const struct qstr *file_name, u32 cookie); + void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); + void (*free_event)(struct fsnotify_event *event); +@@ -277,7 +278,7 @@ static inline const struct path *fsnotify_data_path(const void *data, + + enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_INODE, +- FSNOTIFY_OBJ_TYPE_CHILD, ++ FSNOTIFY_OBJ_TYPE_PARENT, + FSNOTIFY_OBJ_TYPE_VFSMOUNT, + FSNOTIFY_OBJ_TYPE_SB, + FSNOTIFY_OBJ_TYPE_COUNT, +@@ -285,7 +286,7 @@ enum fsnotify_obj_type { + }; + + #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) +-#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD) ++#define FSNOTIFY_OBJ_TYPE_PARENT_FL (1U << FSNOTIFY_OBJ_TYPE_PARENT) + #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) + #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) + #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) +@@ -330,7 +331,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ + } + + FSNOTIFY_ITER_FUNCS(inode, INODE) +-FSNOTIFY_ITER_FUNCS(child, CHILD) ++FSNOTIFY_ITER_FUNCS(parent, PARENT) + FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) + FSNOTIFY_ITER_FUNCS(sb, SB) + +diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h +index a3a838dcf8e4a..7199280d89ca4 100644 +--- a/include/linux/iio/adc/ad_sigma_delta.h ++++ b/include/linux/iio/adc/ad_sigma_delta.h +@@ -79,8 +79,12 @@ struct ad_sigma_delta { + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. ++ * 'tx_buf' is up to 32 bits. ++ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, ++ * rounded to 16 bytes to take into account padding. + */ +- uint8_t data[4] ____cacheline_aligned; ++ uint8_t tx_buf[4] ____cacheline_aligned; ++ uint8_t rx_buf[16] __aligned(8); + }; + + static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 5a9238f6caad9..915f4f100383b 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -14,6 +14,7 @@ + #include <linux/uprobes.h> + #include <linux/page-flags-layout.h> + #include <linux/workqueue.h> ++#include <linux/seqlock.h> + + #include <asm/mmu.h> + +@@ -446,6 +447,13 @@ struct mm_struct { + */ + atomic_t has_pinned; + ++ /** ++ * @write_protect_seq: Locked when any thread is write ++ * protecting pages mapped by this mm to enforce a later COW, ++ * for instance during page table copying for fork(). ++ */ ++ seqcount_t write_protect_seq; ++ + #ifdef CONFIG_MMU + atomic_long_t pgtables_bytes; /* PTE page table pages */ + #endif +diff --git a/include/linux/of.h b/include/linux/of.h +index 5d51891cbf1a6..af655d264f10f 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -1300,6 +1300,7 @@ static inline int of_get_available_child_count(const struct device_node *np) + #define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __used __section("__" #table "_of_table") \ ++ __aligned(__alignof__(struct of_device_id)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } + #else +diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h +index 270cab43ca3da..000cc0533c336 100644 +--- a/include/linux/proc_fs.h ++++ b/include/linux/proc_fs.h +@@ -80,6 +80,7 @@ extern void proc_flush_pid(struct pid *); + + extern struct proc_dir_entry *proc_symlink(const char *, + struct proc_dir_entry *, const char *); ++struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool); + extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); + extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, + struct proc_dir_entry *, void *); +@@ -162,6 +163,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, + static inline struct proc_dir_entry *proc_mkdir(const char *name, + struct proc_dir_entry *parent) {return NULL;} + static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } ++static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, ++ struct proc_dir_entry *parent, void *data, bool force_lookup) ++{ ++ return NULL; ++} + static inline struct proc_dir_entry *proc_mkdir_data(const char *name, + umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } + static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, +@@ -199,7 +205,7 @@ struct net; + static inline struct proc_dir_entry *proc_net_mkdir( + struct net *net, const char *name, struct proc_dir_entry *parent) + { +- return proc_mkdir_data(name, 0, parent, net); ++ return _proc_mkdir(name, 0, parent, net, true); + } + + struct ns_common; +diff --git a/include/linux/rmap.h b/include/linux/rmap.h +index 3a6adfa70fb0e..70085ca1a3fc9 100644 +--- a/include/linux/rmap.h ++++ b/include/linux/rmap.h +@@ -91,7 +91,6 @@ enum ttu_flags { + + TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ + TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ +- TTU_IGNORE_ACCESS = 0x10, /* don't age */ + TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ + TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible + * and caller guarantees they will +diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h +index fb0205d87d3c1..9d6c28cc4d8f2 100644 +--- a/include/linux/seq_buf.h ++++ b/include/linux/seq_buf.h +@@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s) + } + + static inline void +-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) ++seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) + { + s->buffer = buf; + s->size = size; +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h +index a603d48d2b2cd..3ac5037d1c3da 100644 +--- a/include/linux/sunrpc/xprt.h ++++ b/include/linux/sunrpc/xprt.h +@@ -330,6 +330,7 @@ struct xprt_class { + struct rpc_xprt * (*setup)(struct xprt_create *); + struct module *owner; + char name[32]; ++ const char * netid[]; + }; + + /* +diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h +index 6c30508fca198..5a2c650d9e1c1 100644 +--- a/include/linux/trace_seq.h ++++ b/include/linux/trace_seq.h +@@ -12,7 +12,7 @@ + */ + + struct trace_seq { +- unsigned char buffer[PAGE_SIZE]; ++ char buffer[PAGE_SIZE]; + struct seq_buf seq; + int full; + }; +@@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s) + * that is about to be written to and then return the result + * of that write. + */ +-static inline unsigned char * ++static inline char * + trace_seq_buffer_ptr(struct trace_seq *s) + { + return s->buffer + seq_buf_used(&s->seq); +diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h +index c090742765431..ed0840f3d5dff 100644 +--- a/include/media/v4l2-fwnode.h ++++ b/include/media/v4l2-fwnode.h +@@ -231,6 +231,9 @@ struct v4l2_fwnode_connector { + * guessing @vep.bus_type between CSI-2 D-PHY, parallel and BT.656 busses is + * supported. NEVER RELY ON GUESSING @vep.bus_type IN NEW DRIVERS! + * ++ * The caller is required to initialise all fields of @vep, either with ++ * explicitly values, or by zeroing them. ++ * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * NOTE: This function does not parse properties the size of which is variable +@@ -273,6 +276,9 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep); + * guessing @vep.bus_type between CSI-2 D-PHY, parallel and BT.656 busses is + * supported. NEVER RELY ON GUESSING @vep.bus_type IN NEW DRIVERS! + * ++ * The caller is required to initialise all fields of @vep, either with ++ * explicitly values, or by zeroing them. ++ * + * The function does not change the V4L2 fwnode endpoint state if it fails. + * + * v4l2_fwnode_endpoint_alloc_parse() has two important differences to +diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h +index 59b1de1971142..c20e2dc6d4320 100644 +--- a/include/media/v4l2-mediabus.h ++++ b/include/media/v4l2-mediabus.h +@@ -103,6 +103,7 @@ + * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) + * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY + * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY ++ * @V4L2_MBUS_INVALID: invalid bus type (keep as last) + */ + enum v4l2_mbus_type { + V4L2_MBUS_UNKNOWN, +@@ -112,6 +113,7 @@ enum v4l2_mbus_type { + V4L2_MBUS_CCP2, + V4L2_MBUS_CSI2_DPHY, + V4L2_MBUS_CSI2_CPHY, ++ V4L2_MBUS_INVALID, + }; + + /** +diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h +index b00270c72740f..94fac55772f57 100644 +--- a/include/rdma/uverbs_ioctl.h ++++ b/include/rdma/uverbs_ioctl.h +@@ -862,6 +862,16 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, + { + return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO); + } ++ ++static inline __malloc void *uverbs_kcalloc(struct uverbs_attr_bundle *bundle, ++ size_t n, size_t size) ++{ ++ size_t bytes; ++ ++ if (unlikely(check_mul_overflow(n, size, &bytes))) ++ return ERR_PTR(-EOVERFLOW); ++ return uverbs_zalloc(bundle, bytes); ++} + int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val); +diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h +index f1ce2c4c077e2..ec84ad1065683 100644 +--- a/include/uapi/linux/android/binder.h ++++ b/include/uapi/linux/android/binder.h +@@ -248,6 +248,7 @@ enum transaction_flags { + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ ++ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ + }; + + struct binder_transaction_data { +diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h +index 5203f54a2be1c..cf89c318f2ac9 100644 +--- a/include/uapi/linux/devlink.h ++++ b/include/uapi/linux/devlink.h +@@ -322,7 +322,7 @@ enum devlink_reload_limit { + DEVLINK_RELOAD_LIMIT_MAX = __DEVLINK_RELOAD_LIMIT_MAX - 1 + }; + +-#define DEVLINK_RELOAD_LIMITS_VALID_MASK (BIT(__DEVLINK_RELOAD_LIMIT_MAX) - 1) ++#define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1) + + enum devlink_attr { + /* don't change the order or add anything between, this is ABI! */ +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h +index 5a8315e6d8a60..00c7235ae93e7 100644 +--- a/include/xen/xenbus.h ++++ b/include/xen/xenbus.h +@@ -61,6 +61,15 @@ struct xenbus_watch + /* Path being watched. */ + const char *node; + ++ unsigned int nr_pending; ++ ++ /* ++ * Called just before enqueing new event while a spinlock is held. ++ * The event will be discarded if this callback returns false. ++ */ ++ bool (*will_handle)(struct xenbus_watch *, ++ const char *path, const char *token); ++ + /* Callback (executed in a process context with no locks held). */ + void (*callback)(struct xenbus_watch *, + const char *path, const char *token); +@@ -197,10 +206,14 @@ void xenbus_probe(struct work_struct *); + + int xenbus_watch_path(struct xenbus_device *dev, const char *path, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char *, const char *), + void (*callback)(struct xenbus_watch *, + const char *, const char *)); +-__printf(4, 5) ++__printf(5, 6) + int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char *, const char *), + void (*callback)(struct xenbus_watch *, + const char *, const char *), + const char *pathfmt, ...); +diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c +index bfcfcd61adb64..5b3f01da172bc 100644 +--- a/kernel/audit_fsnotify.c ++++ b/kernel/audit_fsnotify.c +@@ -154,7 +154,7 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark) + /* Update mark data in audit rules based on fsnotify events. */ + static int audit_mark_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *dname) ++ const struct qstr *dname, u32 cookie) + { + struct audit_fsnotify_mark *audit_mark; + +diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c +index 83e1c07fc99e1..6c91902f4f455 100644 +--- a/kernel/audit_tree.c ++++ b/kernel/audit_tree.c +@@ -1037,7 +1037,7 @@ static void evict_chunk(struct audit_chunk *chunk) + + static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *file_name) ++ const struct qstr *file_name, u32 cookie) + { + return 0; + } +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c +index 246e5ba704c00..2acf7ca491542 100644 +--- a/kernel/audit_watch.c ++++ b/kernel/audit_watch.c +@@ -466,7 +466,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) + /* Update watch data in audit rules based on fsnotify events. */ + static int audit_watch_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, +- const struct qstr *dname) ++ const struct qstr *dname, u32 cookie) + { + struct audit_parent *parent; + +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 57b5b5d0a5fdd..53c70c470a38d 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -983,25 +983,48 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + */ + static void rebuild_sched_domains_locked(void) + { ++ struct cgroup_subsys_state *pos_css; + struct sched_domain_attr *attr; + cpumask_var_t *doms; ++ struct cpuset *cs; + int ndoms; + + lockdep_assert_cpus_held(); + percpu_rwsem_assert_held(&cpuset_rwsem); + + /* +- * We have raced with CPU hotplug. Don't do anything to avoid ++ * If we have raced with CPU hotplug, return early to avoid + * passing doms with offlined cpu to partition_sched_domains(). +- * Anyways, hotplug work item will rebuild sched domains. ++ * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. ++ * ++ * With no CPUs in any subpartitions, top_cpuset's effective CPUs ++ * should be the same as the active CPUs, so checking only top_cpuset ++ * is enough to detect racing CPU offlines. + */ + if (!top_cpuset.nr_subparts_cpus && + !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) + return; + +- if (top_cpuset.nr_subparts_cpus && +- !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask)) +- return; ++ /* ++ * With subpartition CPUs, however, the effective CPUs of a partition ++ * root should be only a subset of the active CPUs. Since a CPU in any ++ * partition root could be offlined, all must be checked. ++ */ ++ if (top_cpuset.nr_subparts_cpus) { ++ rcu_read_lock(); ++ cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { ++ if (!is_partition_root(cs)) { ++ pos_css = css_rightmost_descendant(pos_css); ++ continue; ++ } ++ if (!cpumask_subset(cs->effective_cpus, ++ cpu_active_mask)) { ++ rcu_read_unlock(); ++ return; ++ } ++ } ++ rcu_read_unlock(); ++ } + + /* Generate domain masks and attrs */ + ndoms = generate_sched_domains(&doms, &attr); +diff --git a/kernel/fork.c b/kernel/fork.c +index 6d266388d3804..dc55f68a6ee36 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1007,6 +1007,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, + mm->vmacache_seqnum = 0; + atomic_set(&mm->mm_users, 1); + atomic_set(&mm->mm_count, 1); ++ seqcount_init(&mm->write_protect_seq); + mmap_init_lock(mm); + INIT_LIST_HEAD(&mm->mmlist); + mm->core_state = NULL; +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c +index e4ca69608f3b8..c6b419db68efc 100644 +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -1373,8 +1373,15 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs) + { +- if (domain->ops->free) +- domain->ops->free(domain, irq_base, nr_irqs); ++ unsigned int i; ++ ++ if (!domain->ops->free) ++ return; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ if (irq_domain_get_irq_data(domain, irq_base + i)) ++ domain->ops->free(domain, irq_base + i, 1); ++ } + } + + int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index bd04b09b84b32..593df7edfe97f 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -177,7 +177,7 @@ module_param(rcu_unlock_delay, int, 0444); + * per-CPU. Object size is equal to one page. This value + * can be changed at boot time. + */ +-static int rcu_min_cached_objs = 2; ++static int rcu_min_cached_objs = 5; + module_param(rcu_min_cached_objs, int, 0444); + + /* Retrieve RCU kthreads priority for rcutorture */ +@@ -928,8 +928,8 @@ void __rcu_irq_enter_check_tick(void) + { + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + +- // Enabling the tick is unsafe in NMI handlers. +- if (WARN_ON_ONCE(in_nmi())) ++ // If we're here from NMI there's nothing to do. ++ if (in_nmi()) + return; + + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), +@@ -1093,8 +1093,11 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) + * CPU can safely enter RCU read-side critical sections. In other words, + * if the current CPU is not in its idle loop or is in an interrupt or + * NMI handler, return true. ++ * ++ * Make notrace because it can be called by the internal functions of ++ * ftrace, and making this notrace removes unnecessary recursion calls. + */ +-bool rcu_is_watching(void) ++notrace bool rcu_is_watching(void) + { + bool ret; + +@@ -3084,6 +3087,9 @@ struct kfree_rcu_cpu_work { + * In order to save some per-cpu space the list is singular. + * Even though it is lockless an access has to be protected by the + * per-cpu lock. ++ * @page_cache_work: A work to refill the cache when it is empty ++ * @work_in_progress: Indicates that page_cache_work is running ++ * @hrtimer: A hrtimer for scheduling a page_cache_work + * @nr_bkv_objs: number of allocated objects at @bkvcache. + * + * This is a per-CPU structure. The reason that it is not included in +@@ -3100,6 +3106,11 @@ struct kfree_rcu_cpu { + bool monitor_todo; + bool initialized; + int count; ++ ++ struct work_struct page_cache_work; ++ atomic_t work_in_progress; ++ struct hrtimer hrtimer; ++ + struct llist_head bkvcache; + int nr_bkv_objs; + }; +@@ -3217,10 +3228,10 @@ static void kfree_rcu_work(struct work_struct *work) + } + rcu_lock_release(&rcu_callback_map); + +- krcp = krc_this_cpu_lock(&flags); ++ raw_spin_lock_irqsave(&krcp->lock, flags); + if (put_cached_bnode(krcp, bkvhead[i])) + bkvhead[i] = NULL; +- krc_this_cpu_unlock(krcp, flags); ++ raw_spin_unlock_irqrestore(&krcp->lock, flags); + + if (bkvhead[i]) + free_page((unsigned long) bkvhead[i]); +@@ -3347,6 +3358,57 @@ static void kfree_rcu_monitor(struct work_struct *work) + raw_spin_unlock_irqrestore(&krcp->lock, flags); + } + ++static enum hrtimer_restart ++schedule_page_work_fn(struct hrtimer *t) ++{ ++ struct kfree_rcu_cpu *krcp = ++ container_of(t, struct kfree_rcu_cpu, hrtimer); ++ ++ queue_work(system_highpri_wq, &krcp->page_cache_work); ++ return HRTIMER_NORESTART; ++} ++ ++static void fill_page_cache_func(struct work_struct *work) ++{ ++ struct kvfree_rcu_bulk_data *bnode; ++ struct kfree_rcu_cpu *krcp = ++ container_of(work, struct kfree_rcu_cpu, ++ page_cache_work); ++ unsigned long flags; ++ bool pushed; ++ int i; ++ ++ for (i = 0; i < rcu_min_cached_objs; i++) { ++ bnode = (struct kvfree_rcu_bulk_data *) ++ __get_free_page(GFP_KERNEL | __GFP_NOWARN); ++ ++ if (bnode) { ++ raw_spin_lock_irqsave(&krcp->lock, flags); ++ pushed = put_cached_bnode(krcp, bnode); ++ raw_spin_unlock_irqrestore(&krcp->lock, flags); ++ ++ if (!pushed) { ++ free_page((unsigned long) bnode); ++ break; ++ } ++ } ++ } ++ ++ atomic_set(&krcp->work_in_progress, 0); ++} ++ ++static void ++run_page_cache_worker(struct kfree_rcu_cpu *krcp) ++{ ++ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && ++ !atomic_xchg(&krcp->work_in_progress, 1)) { ++ hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, ++ HRTIMER_MODE_REL); ++ krcp->hrtimer.function = schedule_page_work_fn; ++ hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); ++ } ++} ++ + static inline bool + kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) + { +@@ -3363,32 +3425,8 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) + if (!krcp->bkvhead[idx] || + krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { + bnode = get_cached_bnode(krcp); +- if (!bnode) { +- /* +- * To keep this path working on raw non-preemptible +- * sections, prevent the optional entry into the +- * allocator as it uses sleeping locks. In fact, even +- * if the caller of kfree_rcu() is preemptible, this +- * path still is not, as krcp->lock is a raw spinlock. +- * With additional page pre-allocation in the works, +- * hitting this return is going to be much less likely. +- */ +- if (IS_ENABLED(CONFIG_PREEMPT_RT)) +- return false; +- +- /* +- * NOTE: For one argument of kvfree_rcu() we can +- * drop the lock and get the page in sleepable +- * context. That would allow to maintain an array +- * for the CONFIG_PREEMPT_RT as well if no cached +- * pages are available. +- */ +- bnode = (struct kvfree_rcu_bulk_data *) +- __get_free_page(GFP_NOWAIT | __GFP_NOWARN); +- } +- + /* Switch to emergency path. */ +- if (unlikely(!bnode)) ++ if (!bnode) + return false; + + /* Initialize the new block. */ +@@ -3452,12 +3490,10 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) + goto unlock_return; + } + +- /* +- * Under high memory pressure GFP_NOWAIT can fail, +- * in that case the emergency path is maintained. +- */ + success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); + if (!success) { ++ run_page_cache_worker(krcp); ++ + if (head == NULL) + // Inline if kvfree_rcu(one_arg) call. + goto unlock_return; +@@ -4449,24 +4485,14 @@ static void __init kfree_rcu_batch_init(void) + + for_each_possible_cpu(cpu) { + struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); +- struct kvfree_rcu_bulk_data *bnode; + + for (i = 0; i < KFREE_N_BATCHES; i++) { + INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); + krcp->krw_arr[i].krcp = krcp; + } + +- for (i = 0; i < rcu_min_cached_objs; i++) { +- bnode = (struct kvfree_rcu_bulk_data *) +- __get_free_page(GFP_NOWAIT | __GFP_NOWARN); +- +- if (bnode) +- put_cached_bnode(krcp, bnode); +- else +- pr_err("Failed to preallocate for %d CPU!\n", cpu); +- } +- + INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); ++ INIT_WORK(&krcp->page_cache_work, fill_page_cache_func); + krcp->initialized = true; + } + if (register_shrinker(&kfree_rcu_shrinker)) +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index e7e453492cffc..77aa0e788b9b7 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6100,12 +6100,8 @@ static void do_sched_yield(void) + schedstat_inc(rq->yld_count); + current->sched_class->yield_task(rq); + +- /* +- * Since we are going to call schedule() anyway, there's +- * no need to preempt or enable interrupts: +- */ + preempt_disable(); +- rq_unlock(rq, &rf); ++ rq_unlock_irq(rq, &rf); + sched_preempt_enable_no_resched(); + + schedule(); +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 1d3c97268ec0d..8d06d1f4e2f7b 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -2547,7 +2547,7 @@ int sched_dl_global_validate(void) + u64 period = global_rt_period(); + u64 new_bw = to_ratio(period, runtime); + struct dl_bw *dl_b; +- int cpu, ret = 0; ++ int cpu, cpus, ret = 0; + unsigned long flags; + + /* +@@ -2562,9 +2562,10 @@ int sched_dl_global_validate(void) + for_each_possible_cpu(cpu) { + rcu_read_lock_sched(); + dl_b = dl_bw_of(cpu); ++ cpus = dl_bw_cpus(cpu); + + raw_spin_lock_irqsave(&dl_b->lock, flags); +- if (new_bw < dl_b->total_bw) ++ if (new_bw * cpus < dl_b->total_bw) + ret = -EBUSY; + raw_spin_unlock_irqrestore(&dl_b->lock, flags); + +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index df80bfcea92eb..c122176c627ec 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -257,30 +257,6 @@ struct rt_bandwidth { + + void __dl_clear_params(struct task_struct *p); + +-/* +- * To keep the bandwidth of -deadline tasks and groups under control +- * we need some place where: +- * - store the maximum -deadline bandwidth of the system (the group); +- * - cache the fraction of that bandwidth that is currently allocated. +- * +- * This is all done in the data structure below. It is similar to the +- * one used for RT-throttling (rt_bandwidth), with the main difference +- * that, since here we are only interested in admission control, we +- * do not decrease any runtime while the group "executes", neither we +- * need a timer to replenish it. +- * +- * With respect to SMP, the bandwidth is given on a per-CPU basis, +- * meaning that: +- * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; +- * - dl_total_bw array contains, in the i-eth element, the currently +- * allocated bandwidth on the i-eth CPU. +- * Moreover, groups consume bandwidth on each CPU, while tasks only +- * consume bandwidth on the CPU they're running on. +- * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw +- * that will be shown the next time the proc or cgroup controls will +- * be red. It on its turn can be changed by writing on its own +- * control. +- */ + struct dl_bandwidth { + raw_spinlock_t dl_runtime_lock; + u64 dl_runtime; +@@ -292,6 +268,24 @@ static inline int dl_bandwidth_enabled(void) + return sysctl_sched_rt_runtime >= 0; + } + ++/* ++ * To keep the bandwidth of -deadline tasks under control ++ * we need some place where: ++ * - store the maximum -deadline bandwidth of each cpu; ++ * - cache the fraction of bandwidth that is currently allocated in ++ * each root domain; ++ * ++ * This is all done in the data structure below. It is similar to the ++ * one used for RT-throttling (rt_bandwidth), with the main difference ++ * that, since here we are only interested in admission control, we ++ * do not decrease any runtime while the group "executes", neither we ++ * need a timer to replenish it. ++ * ++ * With respect to SMP, bandwidth is given on a per root domain basis, ++ * meaning that: ++ * - bw (< 100%) is the deadline bandwidth of each CPU; ++ * - total_bw is the currently allocated bandwidth in each root domain; ++ */ + struct dl_bw { + raw_spinlock_t lock; + u64 bw; +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index a125ea5e04cd7..0dde84b9d29fe 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -2041,10 +2041,12 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) + + void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) + { +- struct module *mod = __module_address((unsigned long)btp); ++ struct module *mod; + +- if (mod) +- module_put(mod); ++ preempt_disable(); ++ mod = __module_address((unsigned long)btp); ++ module_put(mod); ++ preempt_enable(); + } + + static __always_inline +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index a6268e09160a5..ddeb865706ba4 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -129,7 +129,16 @@ int ring_buffer_print_entry_header(struct trace_seq *s) + #define RB_ALIGNMENT 4U + #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) + #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ +-#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT) ++ ++#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS ++# define RB_FORCE_8BYTE_ALIGNMENT 0 ++# define RB_ARCH_ALIGNMENT RB_ALIGNMENT ++#else ++# define RB_FORCE_8BYTE_ALIGNMENT 1 ++# define RB_ARCH_ALIGNMENT 8U ++#endif ++ ++#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) + + /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ + #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX +@@ -2719,7 +2728,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, + + event->time_delta = delta; + length -= RB_EVNT_HDR_SIZE; +- if (length > RB_MAX_SMALL_DATA) { ++ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { + event->type_len = 0; + event->array[0] = length; + } else +@@ -2734,11 +2743,11 @@ static unsigned rb_calculate_event_length(unsigned length) + if (!length) + length++; + +- if (length > RB_MAX_SMALL_DATA) ++ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) + length += sizeof(event.array[0]); + + length += RB_EVNT_HDR_SIZE; +- length = ALIGN(length, RB_ALIGNMENT); ++ length = ALIGN(length, RB_ARCH_ALIGNMENT); + + /* + * In case the time delta is larger than the 27 bits for it +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 06134189e9a72..3119d68d012df 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -68,10 +68,21 @@ bool ring_buffer_expanded; + static bool __read_mostly tracing_selftest_running; + + /* +- * If a tracer is running, we do not want to run SELFTEST. ++ * If boot-time tracing including tracers/events via kernel cmdline ++ * is running, we do not want to run SELFTEST. + */ + bool __read_mostly tracing_selftest_disabled; + ++#ifdef CONFIG_FTRACE_STARTUP_TEST ++void __init disable_tracing_selftest(const char *reason) ++{ ++ if (!tracing_selftest_disabled) { ++ tracing_selftest_disabled = true; ++ pr_info("Ftrace startup test is disabled due to %s\n", reason); ++ } ++} ++#endif ++ + /* Pipe tracepoints to printk */ + struct trace_iterator *tracepoint_print_iter; + int tracepoint_printk; +@@ -2113,11 +2124,7 @@ int __init register_tracer(struct tracer *type) + apply_trace_boot_options(); + + /* disable other selftests, since this will break it. */ +- tracing_selftest_disabled = true; +-#ifdef CONFIG_FTRACE_STARTUP_TEST +- printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", +- type->name); +-#endif ++ disable_tracing_selftest("running a tracer"); + + out_unlock: + return ret; +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index 1dadef445cd1e..6784b572ce597 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -896,6 +896,8 @@ extern bool ring_buffer_expanded; + extern bool tracing_selftest_disabled; + + #ifdef CONFIG_FTRACE_STARTUP_TEST ++extern void __init disable_tracing_selftest(const char *reason); ++ + extern int trace_selftest_startup_function(struct tracer *trace, + struct trace_array *tr); + extern int trace_selftest_startup_function_graph(struct tracer *trace, +@@ -919,6 +921,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace, + */ + #define __tracer_data __refdata + #else ++static inline void __init disable_tracing_selftest(const char *reason) ++{ ++} + /* Tracers are seldom changed. Optimize when selftests are disabled. */ + #define __tracer_data __read_mostly + #endif /* CONFIG_FTRACE_STARTUP_TEST */ +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c +index c22a152ef0b4f..a82f03f385f89 100644 +--- a/kernel/trace/trace_boot.c ++++ b/kernel/trace/trace_boot.c +@@ -344,6 +344,8 @@ static int __init trace_boot_init(void) + trace_boot_init_one_instance(tr, trace_node); + trace_boot_init_instances(trace_node); + ++ disable_tracing_selftest("running boot-time tracing"); ++ + return 0; + } + /* +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 47a71f96e5bcc..802f3e7d8b8b5 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -3201,7 +3201,7 @@ static __init int setup_trace_event(char *str) + { + strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); + ring_buffer_expanded = true; +- tracing_selftest_disabled = true; ++ disable_tracing_selftest("running event tracing"); + + return 1; + } +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index b911e9f6d9f5c..b29f92c51b1a4 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -25,11 +25,12 @@ + + /* Kprobe early definition from command line */ + static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata; +-static bool kprobe_boot_events_enabled __initdata; + + static int __init set_kprobe_boot_events(char *str) + { + strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE); ++ disable_tracing_selftest("running kprobe events"); ++ + return 0; + } + __setup("kprobe_event=", set_kprobe_boot_events); +@@ -1887,8 +1888,6 @@ static __init void setup_boot_kprobe_events(void) + ret = trace_run_command(cmd, create_or_delete_trace_kprobe); + if (ret) + pr_warn("Failed to add event(%d): %s\n", ret, cmd); +- else +- kprobe_boot_events_enabled = true; + + cmd = p; + } +@@ -1973,10 +1972,8 @@ static __init int kprobe_trace_self_tests_init(void) + if (tracing_is_disabled()) + return -ENODEV; + +- if (kprobe_boot_events_enabled) { +- pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n"); ++ if (tracing_selftest_disabled) + return 0; +- } + + target = kprobe_trace_selftest_target; + +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +index 4738ad48a6674..6f28b8b11ead6 100644 +--- a/kernel/trace/trace_selftest.c ++++ b/kernel/trace/trace_selftest.c +@@ -787,7 +787,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, + + /* Have we just recovered from a hang? */ + if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { +- tracing_selftest_disabled = true; ++ disable_tracing_selftest("recovering from a hang"); + ret = -1; + goto out; + } +diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c +index bd7b3aaa93c38..c70d6347afa2b 100644 +--- a/lib/dynamic_debug.c ++++ b/lib/dynamic_debug.c +@@ -561,9 +561,14 @@ static int ddebug_exec_queries(char *query, const char *modname) + int dynamic_debug_exec_queries(const char *query, const char *modname) + { + int rc; +- char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); ++ char *qry; /* writable copy of query */ + +- if (!query) ++ if (!query) { ++ pr_err("non-null query/command string expected\n"); ++ return -EINVAL; ++ } ++ qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); ++ if (!qry) + return -ENOMEM; + + rc = ddebug_exec_queries(qry, modname); +diff --git a/mm/gup.c b/mm/gup.c +index 98eb8e6d2609c..054ff923d3d92 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -123,6 +123,28 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page, + return NULL; + } + ++static void put_compound_head(struct page *page, int refs, unsigned int flags) ++{ ++ if (flags & FOLL_PIN) { ++ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, ++ refs); ++ ++ if (hpage_pincount_available(page)) ++ hpage_pincount_sub(page, refs); ++ else ++ refs *= GUP_PIN_COUNTING_BIAS; ++ } ++ ++ VM_BUG_ON_PAGE(page_ref_count(page) < refs, page); ++ /* ++ * Calling put_page() for each ref is unnecessarily slow. Only the last ++ * ref needs a put_page(). ++ */ ++ if (refs > 1) ++ page_ref_sub(page, refs - 1); ++ put_page(page); ++} ++ + /** + * try_grab_page() - elevate a page's refcount by a flag-dependent amount + * +@@ -177,41 +199,6 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags) + return true; + } + +-#ifdef CONFIG_DEV_PAGEMAP_OPS +-static bool __unpin_devmap_managed_user_page(struct page *page) +-{ +- int count, refs = 1; +- +- if (!page_is_devmap_managed(page)) +- return false; +- +- if (hpage_pincount_available(page)) +- hpage_pincount_sub(page, 1); +- else +- refs = GUP_PIN_COUNTING_BIAS; +- +- count = page_ref_sub_return(page, refs); +- +- mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1); +- /* +- * devmap page refcounts are 1-based, rather than 0-based: if +- * refcount is 1, then the page is free and the refcount is +- * stable because nobody holds a reference on the page. +- */ +- if (count == 1) +- free_devmap_managed_page(page); +- else if (!count) +- __put_page(page); +- +- return true; +-} +-#else +-static bool __unpin_devmap_managed_user_page(struct page *page) +-{ +- return false; +-} +-#endif /* CONFIG_DEV_PAGEMAP_OPS */ +- + /** + * unpin_user_page() - release a dma-pinned page + * @page: pointer to page to be released +@@ -223,28 +210,7 @@ static bool __unpin_devmap_managed_user_page(struct page *page) + */ + void unpin_user_page(struct page *page) + { +- int refs = 1; +- +- page = compound_head(page); +- +- /* +- * For devmap managed pages we need to catch refcount transition from +- * GUP_PIN_COUNTING_BIAS to 1, when refcount reach one it means the +- * page is free and we need to inform the device driver through +- * callback. See include/linux/memremap.h and HMM for details. +- */ +- if (__unpin_devmap_managed_user_page(page)) +- return; +- +- if (hpage_pincount_available(page)) +- hpage_pincount_sub(page, 1); +- else +- refs = GUP_PIN_COUNTING_BIAS; +- +- if (page_ref_sub_and_test(page, refs)) +- __put_page(page); +- +- mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1); ++ put_compound_head(compound_head(page), 1, FOLL_PIN); + } + EXPORT_SYMBOL(unpin_user_page); + +@@ -2062,29 +2028,6 @@ EXPORT_SYMBOL(get_user_pages_unlocked); + * This code is based heavily on the PowerPC implementation by Nick Piggin. + */ + #ifdef CONFIG_HAVE_FAST_GUP +- +-static void put_compound_head(struct page *page, int refs, unsigned int flags) +-{ +- if (flags & FOLL_PIN) { +- mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, +- refs); +- +- if (hpage_pincount_available(page)) +- hpage_pincount_sub(page, refs); +- else +- refs *= GUP_PIN_COUNTING_BIAS; +- } +- +- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page); +- /* +- * Calling put_page() for each ref is unnecessarily slow. Only the last +- * ref needs a put_page(). +- */ +- if (refs > 1) +- page_ref_sub(page, refs - 1); +- put_page(page); +-} +- + #ifdef CONFIG_GUP_GET_PTE_LOW_HIGH + + /* +@@ -2677,13 +2620,61 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, + return ret; + } + +-static int internal_get_user_pages_fast(unsigned long start, int nr_pages, ++static unsigned long lockless_pages_from_mm(unsigned long start, ++ unsigned long end, ++ unsigned int gup_flags, ++ struct page **pages) ++{ ++ unsigned long flags; ++ int nr_pinned = 0; ++ unsigned seq; ++ ++ if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || ++ !gup_fast_permitted(start, end)) ++ return 0; ++ ++ if (gup_flags & FOLL_PIN) { ++ seq = raw_read_seqcount(¤t->mm->write_protect_seq); ++ if (seq & 1) ++ return 0; ++ } ++ ++ /* ++ * Disable interrupts. The nested form is used, in order to allow full, ++ * general purpose use of this routine. ++ * ++ * With interrupts disabled, we block page table pages from being freed ++ * from under us. See struct mmu_table_batch comments in ++ * include/asm-generic/tlb.h for more details. ++ * ++ * We do not adopt an rcu_read_lock() here as we also want to block IPIs ++ * that come from THPs splitting. ++ */ ++ local_irq_save(flags); ++ gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); ++ local_irq_restore(flags); ++ ++ /* ++ * When pinning pages for DMA there could be a concurrent write protect ++ * from fork() via copy_page_range(), in this case always fail fast GUP. ++ */ ++ if (gup_flags & FOLL_PIN) { ++ if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { ++ unpin_user_pages(pages, nr_pinned); ++ return 0; ++ } ++ } ++ return nr_pinned; ++} ++ ++static int internal_get_user_pages_fast(unsigned long start, ++ unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages) + { +- unsigned long addr, len, end; +- unsigned long flags; +- int nr_pinned = 0, ret = 0; ++ unsigned long len, end; ++ unsigned long nr_pinned; ++ int ret; + + if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | + FOLL_FORCE | FOLL_PIN | FOLL_GET | +@@ -2697,54 +2688,33 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages, + might_lock_read(¤t->mm->mmap_lock); + + start = untagged_addr(start) & PAGE_MASK; +- addr = start; +- len = (unsigned long) nr_pages << PAGE_SHIFT; +- end = start + len; +- +- if (end <= start) ++ len = nr_pages << PAGE_SHIFT; ++ if (check_add_overflow(start, len, &end)) + return 0; + if (unlikely(!access_ok((void __user *)start, len))) + return -EFAULT; + +- /* +- * Disable interrupts. The nested form is used, in order to allow +- * full, general purpose use of this routine. +- * +- * With interrupts disabled, we block page table pages from being +- * freed from under us. See struct mmu_table_batch comments in +- * include/asm-generic/tlb.h for more details. +- * +- * We do not adopt an rcu_read_lock(.) here as we also want to +- * block IPIs that come from THPs splitting. +- */ +- if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) { +- unsigned long fast_flags = gup_flags; +- +- local_irq_save(flags); +- gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned); +- local_irq_restore(flags); +- ret = nr_pinned; +- } +- +- if (nr_pinned < nr_pages && !(gup_flags & FOLL_FAST_ONLY)) { +- /* Try to get the remaining pages with get_user_pages */ +- start += nr_pinned << PAGE_SHIFT; +- pages += nr_pinned; +- +- ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, +- gup_flags, pages); ++ nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); ++ if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) ++ return nr_pinned; + +- /* Have to be a bit careful with return values */ +- if (nr_pinned > 0) { +- if (ret < 0) +- ret = nr_pinned; +- else +- ret += nr_pinned; +- } ++ /* Slow path: try to get the remaining pages with get_user_pages */ ++ start += nr_pinned << PAGE_SHIFT; ++ pages += nr_pinned; ++ ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags, ++ pages); ++ if (ret < 0) { ++ /* ++ * The caller has to unpin the pages we already pinned so ++ * returning -errno is not an option ++ */ ++ if (nr_pinned) ++ return nr_pinned; ++ return ret; + } +- +- return ret; ++ return ret + nr_pinned; + } ++ + /** + * get_user_pages_fast_only() - pin user pages in memory + * @start: starting user address +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index ec2bb93f74314..85eda66eb625d 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2321,7 +2321,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, + + static void unmap_page(struct page *page) + { +- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | ++ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | + TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; + bool unmap_success; + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index d029d938d26d6..3b38ea958e954 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -5115,6 +5115,7 @@ int hugetlb_reserve_pages(struct inode *inode, + + if (unlikely(add < 0)) { + hugetlb_acct_memory(h, -gbl_reserve); ++ ret = add; + goto out_put_pages; + } else if (unlikely(chg > add)) { + /* +diff --git a/mm/init-mm.c b/mm/init-mm.c +index 3a613c85f9ede..153162669f806 100644 +--- a/mm/init-mm.c ++++ b/mm/init-mm.c +@@ -31,6 +31,7 @@ struct mm_struct init_mm = { + .pgd = swapper_pg_dir, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), ++ .write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq), + MMAP_LOCK_INITIALIZER(init_mm) + .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), + .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), +diff --git a/mm/madvise.c b/mm/madvise.c +index 13f5677b93222..9abf4c5f2bce2 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -908,14 +908,7 @@ static int madvise_inject_error(int behavior, + } else { + pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", + pfn, start); +- /* +- * Drop the page reference taken by get_user_pages_fast(). In +- * the absence of MF_COUNT_INCREASED the memory_failure() +- * routine is responsible for pinning the page to prevent it +- * from being released back to the page allocator. +- */ +- put_page(page); +- ret = memory_failure(pfn, 0); ++ ret = memory_failure(pfn, MF_COUNT_INCREASED); + } + + if (ret) +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 29459a6ce1c7a..a717728cc7b4a 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -2987,6 +2987,7 @@ __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) + objcg = rcu_dereference(memcg->objcg); + if (objcg && obj_cgroup_tryget(objcg)) + break; ++ objcg = NULL; + } + rcu_read_unlock(); + +@@ -3246,8 +3247,10 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) + * independently later. + */ + rcu_read_lock(); ++retry: + memcg = obj_cgroup_memcg(objcg); +- css_get(&memcg->css); ++ if (unlikely(!css_tryget(&memcg->css))) ++ goto retry; + rcu_read_unlock(); + + nr_pages = size >> PAGE_SHIFT; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 5d880d4eb9a26..fd653c9953cfd 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -989,7 +989,7 @@ static int get_hwpoison_page(struct page *page) + static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, + int flags, struct page **hpagep) + { +- enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; ++ enum ttu_flags ttu = TTU_IGNORE_MLOCK; + struct address_space *mapping; + LIST_HEAD(tokill); + bool unmap_success = true; +@@ -1231,6 +1231,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, + loff_t start; + dax_entry_t cookie; + ++ if (flags & MF_COUNT_INCREASED) ++ /* ++ * Drop the extra refcount in case we come from madvise(). ++ */ ++ put_page(page); ++ + /* + * Prevent the inode from being freed while we are interrogating + * the address_space, typically this would be handled by +diff --git a/mm/memory.c b/mm/memory.c +index c48f8df6e5026..50632c4366b8a 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1171,6 +1171,15 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, + 0, src_vma, src_mm, addr, end); + mmu_notifier_invalidate_range_start(&range); ++ /* ++ * Disabling preemption is not needed for the write side, as ++ * the read side doesn't spin, but goes to the mmap_lock. ++ * ++ * Use the raw variant of the seqcount_t write API to avoid ++ * lockdep complaining about preemptibility. ++ */ ++ mmap_assert_write_locked(src_mm); ++ raw_write_seqcount_begin(&src_mm->write_protect_seq); + } + + ret = 0; +@@ -1187,8 +1196,10 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) + } + } while (dst_pgd++, src_pgd++, addr = next, addr != end); + +- if (is_cow) ++ if (is_cow) { ++ raw_write_seqcount_end(&src_mm->write_protect_seq); + mmu_notifier_invalidate_range_end(&range); ++ } + return ret; + } + +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 63b2e46b65552..0f855deea4b2d 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1304,7 +1304,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) + if (WARN_ON(PageLRU(page))) + isolate_lru_page(page); + if (page_mapped(page)) +- try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); ++ try_to_unmap(page, TTU_IGNORE_MLOCK); + continue; + } + +diff --git a/mm/migrate.c b/mm/migrate.c +index 5795cb82e27c3..8ea0c65f10756 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1122,8 +1122,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, + /* Establish migration ptes */ + VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, + page); +- try_to_unmap(page, +- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); ++ try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK); + page_was_mapped = 1; + } + +@@ -1329,8 +1328,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, + + if (page_mapped(hpage)) { + bool mapping_locked = false; +- enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK| +- TTU_IGNORE_ACCESS; ++ enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK; + + if (!PageAnon(hpage)) { + /* +@@ -2688,7 +2686,7 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) + */ + static void migrate_vma_unmap(struct migrate_vma *migrate) + { +- int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; ++ int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK; + const unsigned long npages = migrate->npages; + const unsigned long start = migrate->start; + unsigned long addr, i, restore = 0; +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index eaa227a479e4a..32f783ddb5c3a 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2470,12 +2470,12 @@ static bool can_steal_fallback(unsigned int order, int start_mt) + return false; + } + +-static inline void boost_watermark(struct zone *zone) ++static inline bool boost_watermark(struct zone *zone) + { + unsigned long max_boost; + + if (!watermark_boost_factor) +- return; ++ return false; + /* + * Don't bother in zones that are unlikely to produce results. + * On small machines, including kdump capture kernels running +@@ -2483,7 +2483,7 @@ static inline void boost_watermark(struct zone *zone) + * memory situation immediately. + */ + if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) +- return; ++ return false; + + max_boost = mult_frac(zone->_watermark[WMARK_HIGH], + watermark_boost_factor, 10000); +@@ -2497,12 +2497,14 @@ static inline void boost_watermark(struct zone *zone) + * boosted watermark resulting in a hang. + */ + if (!max_boost) +- return; ++ return false; + + max_boost = max(pageblock_nr_pages, max_boost); + + zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, + max_boost); ++ ++ return true; + } + + /* +@@ -2540,8 +2542,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, + * likelihood of future fallbacks. Wake kswapd now as the node + * may be balanced overall and kswapd will not wake naturally. + */ +- boost_watermark(zone); +- if (alloc_flags & ALLOC_KSWAPD) ++ if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) + set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); + + /* We are not allowed to try stealing from the whole block */ +diff --git a/mm/rmap.c b/mm/rmap.c +index 31b29321adfe1..6657000b18d41 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -1533,15 +1533,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + goto discard; + } + +- if (!(flags & TTU_IGNORE_ACCESS)) { +- if (ptep_clear_flush_young_notify(vma, address, +- pvmw.pte)) { +- ret = false; +- page_vma_mapped_walk_done(&pvmw); +- break; +- } +- } +- + /* Nuke the page table entry. */ + flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + if (should_defer_flush(mm, flags)) { +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 6ae491a8b210f..279dc0c96568c 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -2256,7 +2256,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); + debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); + +- kasan_poison_vmalloc(area->addr, area->size); ++ kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); + + vm_remove_mappings(area, deallocate_pages); + +@@ -3448,11 +3448,11 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) + } + + static void s_stop(struct seq_file *m, void *p) +- __releases(&vmap_purge_lock) + __releases(&vmap_area_lock) ++ __releases(&vmap_purge_lock) + { +- mutex_unlock(&vmap_purge_lock); + spin_unlock(&vmap_area_lock); ++ mutex_unlock(&vmap_purge_lock); + } + + static void show_numa_info(struct seq_file *m, struct vm_struct *v) +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 7b4e31eac2cff..0ec6321e98878 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -1072,7 +1072,6 @@ static void page_check_dirty_writeback(struct page *page, + static unsigned int shrink_page_list(struct list_head *page_list, + struct pglist_data *pgdat, + struct scan_control *sc, +- enum ttu_flags ttu_flags, + struct reclaim_stat *stat, + bool ignore_references) + { +@@ -1297,7 +1296,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, + * processes. Try to unmap it here. + */ + if (page_mapped(page)) { +- enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH; ++ enum ttu_flags flags = TTU_BATCH_FLUSH; + bool was_swapbacked = PageSwapBacked(page); + + if (unlikely(PageTransHuge(page))) +@@ -1514,7 +1513,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, + } + + nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, +- TTU_IGNORE_ACCESS, &stat, true); ++ &stat, true); + list_splice(&clean_pages, page_list); + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, + -(long)nr_reclaimed); +@@ -1958,8 +1957,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, + if (nr_taken == 0) + return 0; + +- nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, +- &stat, false); ++ nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); + + spin_lock_irq(&pgdat->lru_lock); + +@@ -2131,8 +2129,7 @@ unsigned long reclaim_pages(struct list_head *page_list) + + nr_reclaimed += shrink_page_list(&node_page_list, + NODE_DATA(nid), +- &sc, 0, +- &dummy_stat, false); ++ &sc, &dummy_stat, false); + while (!list_empty(&node_page_list)) { + page = lru_to_page(&node_page_list); + list_del(&page->lru); +@@ -2145,8 +2142,7 @@ unsigned long reclaim_pages(struct list_head *page_list) + if (!list_empty(&node_page_list)) { + nr_reclaimed += shrink_page_list(&node_page_list, + NODE_DATA(nid), +- &sc, 0, +- &dummy_stat, false); ++ &sc, &dummy_stat, false); + while (!list_empty(&node_page_list)) { + page = lru_to_page(&node_page_list); + list_del(&page->lru); +diff --git a/mm/z3fold.c b/mm/z3fold.c +index 18feaa0bc5377..0152ad9931a87 100644 +--- a/mm/z3fold.c ++++ b/mm/z3fold.c +@@ -90,7 +90,7 @@ struct z3fold_buddy_slots { + * be enough slots to hold all possible variants + */ + unsigned long slot[BUDDY_MASK + 1]; +- unsigned long pool; /* back link + flags */ ++ unsigned long pool; /* back link */ + rwlock_t lock; + }; + #define HANDLE_FLAG_MASK (0x03) +@@ -185,7 +185,7 @@ enum z3fold_page_flags { + * handle flags, go under HANDLE_FLAG_MASK + */ + enum z3fold_handle_flags { +- HANDLES_ORPHANED = 0, ++ HANDLES_NOFREE = 0, + }; + + /* +@@ -303,10 +303,9 @@ static inline void put_z3fold_header(struct z3fold_header *zhdr) + z3fold_page_unlock(zhdr); + } + +-static inline void free_handle(unsigned long handle) ++static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) + { + struct z3fold_buddy_slots *slots; +- struct z3fold_header *zhdr; + int i; + bool is_free; + +@@ -316,22 +315,19 @@ static inline void free_handle(unsigned long handle) + if (WARN_ON(*(unsigned long *)handle == 0)) + return; + +- zhdr = handle_to_z3fold_header(handle); + slots = handle_to_slots(handle); + write_lock(&slots->lock); + *(unsigned long *)handle = 0; +- if (zhdr->slots == slots) { ++ ++ if (test_bit(HANDLES_NOFREE, &slots->pool)) { + write_unlock(&slots->lock); + return; /* simple case, nothing else to do */ + } + +- /* we are freeing a foreign handle if we are here */ +- zhdr->foreign_handles--; ++ if (zhdr->slots != slots) ++ zhdr->foreign_handles--; ++ + is_free = true; +- if (!test_bit(HANDLES_ORPHANED, &slots->pool)) { +- write_unlock(&slots->lock); +- return; +- } + for (i = 0; i <= BUDDY_MASK; i++) { + if (slots->slot[i]) { + is_free = false; +@@ -343,6 +339,8 @@ static inline void free_handle(unsigned long handle) + if (is_free) { + struct z3fold_pool *pool = slots_to_pool(slots); + ++ if (zhdr->slots == slots) ++ zhdr->slots = NULL; + kmem_cache_free(pool->c_handle, slots); + } + } +@@ -525,8 +523,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) + { + struct page *page = virt_to_page(zhdr); + struct z3fold_pool *pool = zhdr_to_pool(zhdr); +- bool is_free = true; +- int i; + + WARN_ON(!list_empty(&zhdr->buddy)); + set_bit(PAGE_STALE, &page->private); +@@ -536,21 +532,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) + list_del_init(&page->lru); + spin_unlock(&pool->lock); + +- /* If there are no foreign handles, free the handles array */ +- read_lock(&zhdr->slots->lock); +- for (i = 0; i <= BUDDY_MASK; i++) { +- if (zhdr->slots->slot[i]) { +- is_free = false; +- break; +- } +- } +- if (!is_free) +- set_bit(HANDLES_ORPHANED, &zhdr->slots->pool); +- read_unlock(&zhdr->slots->lock); +- +- if (is_free) +- kmem_cache_free(pool->c_handle, zhdr->slots); +- + if (locked) + z3fold_page_unlock(zhdr); + +@@ -653,6 +634,28 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool, + } + } + ++static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) ++{ ++ enum buddy bud = HEADLESS; ++ ++ if (zhdr->middle_chunks) { ++ if (!zhdr->first_chunks && ++ chunks <= zhdr->start_middle - ZHDR_CHUNKS) ++ bud = FIRST; ++ else if (!zhdr->last_chunks) ++ bud = LAST; ++ } else { ++ if (!zhdr->first_chunks) ++ bud = FIRST; ++ else if (!zhdr->last_chunks) ++ bud = LAST; ++ else ++ bud = MIDDLE; ++ } ++ ++ return bud; ++} ++ + static inline void *mchunk_memmove(struct z3fold_header *zhdr, + unsigned short dst_chunk) + { +@@ -714,18 +717,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) + if (WARN_ON(new_zhdr == zhdr)) + goto out_fail; + +- if (new_zhdr->first_chunks == 0) { +- if (new_zhdr->middle_chunks != 0 && +- chunks >= new_zhdr->start_middle) { +- new_bud = LAST; +- } else { +- new_bud = FIRST; +- } +- } else if (new_zhdr->last_chunks == 0) { +- new_bud = LAST; +- } else if (new_zhdr->middle_chunks == 0) { +- new_bud = MIDDLE; +- } ++ new_bud = get_free_buddy(new_zhdr, chunks); + q = new_zhdr; + switch (new_bud) { + case FIRST: +@@ -847,9 +839,8 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) + return; + } + +- if (unlikely(PageIsolated(page) || +- test_bit(PAGE_CLAIMED, &page->private) || +- test_bit(PAGE_STALE, &page->private))) { ++ if (test_bit(PAGE_STALE, &page->private) || ++ test_and_set_bit(PAGE_CLAIMED, &page->private)) { + z3fold_page_unlock(zhdr); + return; + } +@@ -858,13 +849,16 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) + zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { + if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) + atomic64_dec(&pool->pages_nr); +- else ++ else { ++ clear_bit(PAGE_CLAIMED, &page->private); + z3fold_page_unlock(zhdr); ++ } + return; + } + + z3fold_compact_page(zhdr); + add_to_unbuddied(pool, zhdr); ++ clear_bit(PAGE_CLAIMED, &page->private); + z3fold_page_unlock(zhdr); + } + +@@ -973,6 +967,9 @@ lookup: + } + } + ++ if (zhdr && !zhdr->slots) ++ zhdr->slots = alloc_slots(pool, ++ can_sleep ? GFP_NOIO : GFP_ATOMIC); + return zhdr; + } + +@@ -1109,17 +1106,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, + retry: + zhdr = __z3fold_alloc(pool, size, can_sleep); + if (zhdr) { +- if (zhdr->first_chunks == 0) { +- if (zhdr->middle_chunks != 0 && +- chunks >= zhdr->start_middle) +- bud = LAST; +- else +- bud = FIRST; +- } else if (zhdr->last_chunks == 0) +- bud = LAST; +- else if (zhdr->middle_chunks == 0) +- bud = MIDDLE; +- else { ++ bud = get_free_buddy(zhdr, chunks); ++ if (bud == HEADLESS) { + if (kref_put(&zhdr->refcount, + release_z3fold_page_locked)) + atomic64_dec(&pool->pages_nr); +@@ -1265,12 +1253,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) + pr_err("%s: unknown bud %d\n", __func__, bud); + WARN_ON(1); + put_z3fold_header(zhdr); +- clear_bit(PAGE_CLAIMED, &page->private); + return; + } + + if (!page_claimed) +- free_handle(handle); ++ free_handle(handle, zhdr); + if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { + atomic64_dec(&pool->pages_nr); + return; +@@ -1280,8 +1267,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) + z3fold_page_unlock(zhdr); + return; + } +- if (unlikely(PageIsolated(page)) || +- test_and_set_bit(NEEDS_COMPACTING, &page->private)) { ++ if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { + put_z3fold_header(zhdr); + clear_bit(PAGE_CLAIMED, &page->private); + return; +@@ -1345,6 +1331,10 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) + struct page *page = NULL; + struct list_head *pos; + unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; ++ struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN))); ++ ++ rwlock_init(&slots.lock); ++ slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); + + spin_lock(&pool->lock); + if (!pool->ops || !pool->ops->evict || retries == 0) { +@@ -1359,35 +1349,36 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) + list_for_each_prev(pos, &pool->lru) { + page = list_entry(pos, struct page, lru); + +- /* this bit could have been set by free, in which case +- * we pass over to the next page in the pool. +- */ +- if (test_and_set_bit(PAGE_CLAIMED, &page->private)) { +- page = NULL; +- continue; +- } +- +- if (unlikely(PageIsolated(page))) { +- clear_bit(PAGE_CLAIMED, &page->private); +- page = NULL; +- continue; +- } + zhdr = page_address(page); + if (test_bit(PAGE_HEADLESS, &page->private)) + break; + ++ if (kref_get_unless_zero(&zhdr->refcount) == 0) { ++ zhdr = NULL; ++ break; ++ } + if (!z3fold_page_trylock(zhdr)) { +- clear_bit(PAGE_CLAIMED, &page->private); ++ if (kref_put(&zhdr->refcount, ++ release_z3fold_page)) ++ atomic64_dec(&pool->pages_nr); + zhdr = NULL; + continue; /* can't evict at this point */ + } +- if (zhdr->foreign_handles) { +- clear_bit(PAGE_CLAIMED, &page->private); +- z3fold_page_unlock(zhdr); ++ ++ /* test_and_set_bit is of course atomic, but we still ++ * need to do it under page lock, otherwise checking ++ * that bit in __z3fold_alloc wouldn't make sense ++ */ ++ if (zhdr->foreign_handles || ++ test_and_set_bit(PAGE_CLAIMED, &page->private)) { ++ if (kref_put(&zhdr->refcount, ++ release_z3fold_page)) ++ atomic64_dec(&pool->pages_nr); ++ else ++ z3fold_page_unlock(zhdr); + zhdr = NULL; + continue; /* can't evict such page */ + } +- kref_get(&zhdr->refcount); + list_del_init(&zhdr->buddy); + zhdr->cpu = -1; + break; +@@ -1409,12 +1400,16 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) + first_handle = 0; + last_handle = 0; + middle_handle = 0; ++ memset(slots.slot, 0, sizeof(slots.slot)); + if (zhdr->first_chunks) +- first_handle = encode_handle(zhdr, FIRST); ++ first_handle = __encode_handle(zhdr, &slots, ++ FIRST); + if (zhdr->middle_chunks) +- middle_handle = encode_handle(zhdr, MIDDLE); ++ middle_handle = __encode_handle(zhdr, &slots, ++ MIDDLE); + if (zhdr->last_chunks) +- last_handle = encode_handle(zhdr, LAST); ++ last_handle = __encode_handle(zhdr, &slots, ++ LAST); + /* + * it's safe to unlock here because we hold a + * reference to this page +@@ -1429,19 +1424,16 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) + ret = pool->ops->evict(pool, middle_handle); + if (ret) + goto next; +- free_handle(middle_handle); + } + if (first_handle) { + ret = pool->ops->evict(pool, first_handle); + if (ret) + goto next; +- free_handle(first_handle); + } + if (last_handle) { + ret = pool->ops->evict(pool, last_handle); + if (ret) + goto next; +- free_handle(last_handle); + } + next: + if (test_bit(PAGE_HEADLESS, &page->private)) { +@@ -1455,9 +1447,11 @@ next: + spin_unlock(&pool->lock); + clear_bit(PAGE_CLAIMED, &page->private); + } else { ++ struct z3fold_buddy_slots *slots = zhdr->slots; + z3fold_page_lock(zhdr); + if (kref_put(&zhdr->refcount, + release_z3fold_page_locked)) { ++ kmem_cache_free(pool->c_handle, slots); + atomic64_dec(&pool->pages_nr); + return 0; + } +@@ -1573,8 +1567,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(PageIsolated(page), page); + +- if (test_bit(PAGE_HEADLESS, &page->private) || +- test_bit(PAGE_CLAIMED, &page->private)) ++ if (test_bit(PAGE_HEADLESS, &page->private)) + return false; + + zhdr = page_address(page); +@@ -1586,6 +1579,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) + if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) + goto out; + ++ if (test_and_set_bit(PAGE_CLAIMED, &page->private)) ++ goto out; + pool = zhdr_to_pool(zhdr); + spin_lock(&pool->lock); + if (!list_empty(&zhdr->buddy)) +@@ -1612,16 +1607,17 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa + + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(!PageIsolated(page), page); ++ VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); + VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); + + zhdr = page_address(page); + pool = zhdr_to_pool(zhdr); + +- if (!z3fold_page_trylock(zhdr)) { ++ if (!z3fold_page_trylock(zhdr)) + return -EAGAIN; +- } + if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { + z3fold_page_unlock(zhdr); ++ clear_bit(PAGE_CLAIMED, &page->private); + return -EBUSY; + } + if (work_pending(&zhdr->work)) { +@@ -1663,6 +1659,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa + queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); + + page_mapcount_reset(page); ++ clear_bit(PAGE_CLAIMED, &page->private); + put_page(page); + return 0; + } +@@ -1686,6 +1683,7 @@ static void z3fold_page_putback(struct page *page) + spin_lock(&pool->lock); + list_add(&page->lru, &pool->lru); + spin_unlock(&pool->lock); ++ clear_bit(PAGE_CLAIMED, &page->private); + z3fold_page_unlock(zhdr); + } + +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index cbdf2a5559754..17a72695865b5 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -4941,6 +4941,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, + return; + } + ++ if (!hcon->amp_mgr) { ++ hci_dev_unlock(hdev); ++ return; ++ } ++ + if (ev->status) { + hci_conn_del(hcon); + hci_dev_unlock(hdev); +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c +index 6f12bab4d2fa6..610ed0817bd77 100644 +--- a/net/bluetooth/hci_request.c ++++ b/net/bluetooth/hci_request.c +@@ -698,7 +698,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, + cp.bdaddr_type); + hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); + +- if (use_ll_privacy(req->hdev)) { ++ if (use_ll_privacy(req->hdev) && ++ hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); +@@ -732,7 +733,8 @@ static int add_to_white_list(struct hci_request *req, + return -1; + + /* White list can not be used with RPAs */ +- if (!allow_rpa && !use_ll_privacy(hdev) && ++ if (!allow_rpa && ++ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { + return -1; + } +@@ -750,7 +752,8 @@ static int add_to_white_list(struct hci_request *req, + cp.bdaddr_type); + hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); + +- if (use_ll_privacy(hdev)) { ++ if (use_ll_privacy(hdev) && ++ hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, +@@ -812,7 +815,8 @@ static u8 update_white_list(struct hci_request *req) + } + + /* White list can not be used with RPAs */ +- if (!allow_rpa && !use_ll_privacy(hdev) && ++ if (!allow_rpa && ++ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { + return 0x00; + } +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c +index 79ffcdef0b7ad..22a110f37abc6 100644 +--- a/net/bluetooth/sco.c ++++ b/net/bluetooth/sco.c +@@ -1003,6 +1003,11 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, + + case BT_SNDMTU: + case BT_RCVMTU: ++ if (sk->sk_state != BT_CONNECTED) { ++ err = -ENOTCONN; ++ break; ++ } ++ + if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) + err = -EFAULT; + break; +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 1e2e5a406d587..2a5a11f92b03e 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1758,7 +1758,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) + } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { + sta->rx_stats.last_rx = jiffies; + } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && +- is_multicast_ether_addr(hdr->addr1)) { ++ !is_multicast_ether_addr(hdr->addr1)) { + /* + * Mesh beacons will update last_rx when if they are found to + * match the current local configuration when processed. +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c +index fb0e3a657d2d3..c3ca973737742 100644 +--- a/net/mac80211/vht.c ++++ b/net/mac80211/vht.c +@@ -465,12 +465,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) + * IEEE80211-2016 specification makes higher bandwidth operation + * possible on the TDLS link if the peers have wider bandwidth + * capability. ++ * ++ * However, in this case, and only if the TDLS peer is authorized, ++ * limit to the tdls_chandef so that the configuration here isn't ++ * wider than what's actually requested on the channel context. + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && +- test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) +- return bw; +- +- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); ++ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) && ++ test_sta_flag(sta, WLAN_STA_AUTHORIZED) && ++ sta->tdls_chandef.chan) ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width)); ++ else ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + + return bw; + } +diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c +index fd9bca2427242..56029e3af6ff0 100644 +--- a/net/sunrpc/debugfs.c ++++ b/net/sunrpc/debugfs.c +@@ -128,13 +128,13 @@ static int do_xprt_debugfs(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *n + return 0; + len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", + xprt->debugfs->d_name.name); +- if (len > sizeof(name)) ++ if (len >= sizeof(name)) + return -1; + if (*nump == 0) + strcpy(link, "xprt"); + else { + len = snprintf(link, sizeof(link), "xprt%d", *nump); +- if (len > sizeof(link)) ++ if (len >= sizeof(link)) + return -1; + } + debugfs_create_symlink(link, clnt->cl_debugfs, name); +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index f06d7c315017c..cf702a5f7fe5d 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -675,6 +675,23 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) + } + EXPORT_SYMBOL_GPL(rpc_wake_up_next); + ++/** ++ * rpc_wake_up_locked - wake up all rpc_tasks ++ * @queue: rpc_wait_queue on which the tasks are sleeping ++ * ++ */ ++static void rpc_wake_up_locked(struct rpc_wait_queue *queue) ++{ ++ struct rpc_task *task; ++ ++ for (;;) { ++ task = __rpc_find_next_queued(queue); ++ if (task == NULL) ++ break; ++ rpc_wake_up_task_queue_locked(queue, task); ++ } ++} ++ + /** + * rpc_wake_up - wake up all rpc_tasks + * @queue: rpc_wait_queue on which the tasks are sleeping +@@ -683,25 +700,28 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next); + */ + void rpc_wake_up(struct rpc_wait_queue *queue) + { +- struct list_head *head; +- + spin_lock(&queue->lock); +- head = &queue->tasks[queue->maxpriority]; ++ rpc_wake_up_locked(queue); ++ spin_unlock(&queue->lock); ++} ++EXPORT_SYMBOL_GPL(rpc_wake_up); ++ ++/** ++ * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. ++ * @queue: rpc_wait_queue on which the tasks are sleeping ++ * @status: status value to set ++ */ ++static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) ++{ ++ struct rpc_task *task; ++ + for (;;) { +- while (!list_empty(head)) { +- struct rpc_task *task; +- task = list_first_entry(head, +- struct rpc_task, +- u.tk_wait.list); +- rpc_wake_up_task_queue_locked(queue, task); +- } +- if (head == &queue->tasks[0]) ++ task = __rpc_find_next_queued(queue); ++ if (task == NULL) + break; +- head--; ++ rpc_wake_up_task_queue_set_status_locked(queue, task, status); + } +- spin_unlock(&queue->lock); + } +-EXPORT_SYMBOL_GPL(rpc_wake_up); + + /** + * rpc_wake_up_status - wake up all rpc_tasks and set their status value. +@@ -712,23 +732,8 @@ EXPORT_SYMBOL_GPL(rpc_wake_up); + */ + void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) + { +- struct list_head *head; +- + spin_lock(&queue->lock); +- head = &queue->tasks[queue->maxpriority]; +- for (;;) { +- while (!list_empty(head)) { +- struct rpc_task *task; +- task = list_first_entry(head, +- struct rpc_task, +- u.tk_wait.list); +- task->tk_status = status; +- rpc_wake_up_task_queue_locked(queue, task); +- } +- if (head == &queue->tasks[0]) +- break; +- head--; +- } ++ rpc_wake_up_status_locked(queue, status); + spin_unlock(&queue->lock); + } + EXPORT_SYMBOL_GPL(rpc_wake_up_status); +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c +index f6c17e75f20ed..57f09ea3ef2af 100644 +--- a/net/sunrpc/xprt.c ++++ b/net/sunrpc/xprt.c +@@ -151,31 +151,64 @@ out: + } + EXPORT_SYMBOL_GPL(xprt_unregister_transport); + ++static void ++xprt_class_release(const struct xprt_class *t) ++{ ++ module_put(t->owner); ++} ++ ++static const struct xprt_class * ++xprt_class_find_by_netid_locked(const char *netid) ++{ ++ const struct xprt_class *t; ++ unsigned int i; ++ ++ list_for_each_entry(t, &xprt_list, list) { ++ for (i = 0; t->netid[i][0] != '\0'; i++) { ++ if (strcmp(t->netid[i], netid) != 0) ++ continue; ++ if (!try_module_get(t->owner)) ++ continue; ++ return t; ++ } ++ } ++ return NULL; ++} ++ ++static const struct xprt_class * ++xprt_class_find_by_netid(const char *netid) ++{ ++ const struct xprt_class *t; ++ ++ spin_lock(&xprt_list_lock); ++ t = xprt_class_find_by_netid_locked(netid); ++ if (!t) { ++ spin_unlock(&xprt_list_lock); ++ request_module("rpc%s", netid); ++ spin_lock(&xprt_list_lock); ++ t = xprt_class_find_by_netid_locked(netid); ++ } ++ spin_unlock(&xprt_list_lock); ++ return t; ++} ++ + /** + * xprt_load_transport - load a transport implementation +- * @transport_name: transport to load ++ * @netid: transport to load + * + * Returns: + * 0: transport successfully loaded + * -ENOENT: transport module not available + */ +-int xprt_load_transport(const char *transport_name) ++int xprt_load_transport(const char *netid) + { +- struct xprt_class *t; +- int result; ++ const struct xprt_class *t; + +- result = 0; +- spin_lock(&xprt_list_lock); +- list_for_each_entry(t, &xprt_list, list) { +- if (strcmp(t->name, transport_name) == 0) { +- spin_unlock(&xprt_list_lock); +- goto out; +- } +- } +- spin_unlock(&xprt_list_lock); +- result = request_module("xprt%s", transport_name); +-out: +- return result; ++ t = xprt_class_find_by_netid(netid); ++ if (!t) ++ return -ENOENT; ++ xprt_class_release(t); ++ return 0; + } + EXPORT_SYMBOL_GPL(xprt_load_transport); + +diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c +index 620327c01302c..45c5b41ac8dc9 100644 +--- a/net/sunrpc/xprtrdma/module.c ++++ b/net/sunrpc/xprtrdma/module.c +@@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport"); + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_ALIAS("svcrdma"); + MODULE_ALIAS("xprtrdma"); ++MODULE_ALIAS("rpcrdma6"); + + static void __exit rpc_rdma_cleanup(void) + { +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c +index 0f5120c7668ff..c48536f2121fb 100644 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c +@@ -179,6 +179,31 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, + r_xprt->rx_ep->re_max_inline_recv; + } + ++/* ACL likes to be lazy in allocating pages. For TCP, these ++ * pages can be allocated during receive processing. Not true ++ * for RDMA, which must always provision receive buffers ++ * up front. ++ */ ++static noinline int ++rpcrdma_alloc_sparse_pages(struct xdr_buf *buf) ++{ ++ struct page **ppages; ++ int len; ++ ++ len = buf->page_len; ++ ppages = buf->pages + (buf->page_base >> PAGE_SHIFT); ++ while (len > 0) { ++ if (!*ppages) ++ *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); ++ if (!*ppages) ++ return -ENOBUFS; ++ ppages++; ++ len -= PAGE_SIZE; ++ } ++ ++ return 0; ++} ++ + /* Split @vec on page boundaries into SGEs. FMR registers pages, not + * a byte range. Other modes coalesce these SGEs into a single MR + * when they can. +@@ -233,15 +258,6 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, + ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); + page_base = offset_in_page(xdrbuf->page_base); + while (len) { +- /* ACL likes to be lazy in allocating pages - ACLs +- * are small by default but can get huge. +- */ +- if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) { +- if (!*ppages) +- *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); +- if (!*ppages) +- return -ENOBUFS; +- } + seg->mr_page = *ppages; + seg->mr_offset = (char *)page_base; + seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); +@@ -867,6 +883,12 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) + __be32 *p; + int ret; + ++ if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) { ++ ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf); ++ if (ret) ++ return ret; ++ } ++ + rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); + xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf), + rqst); +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c +index 8915e42240d38..035060c05fd5a 100644 +--- a/net/sunrpc/xprtrdma/transport.c ++++ b/net/sunrpc/xprtrdma/transport.c +@@ -768,6 +768,7 @@ static struct xprt_class xprt_rdma = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_RDMA, + .setup = xprt_setup_rdma, ++ .netid = { "rdma", "rdma6", "" }, + }; + + void xprt_rdma_cleanup(void) +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 7090bbee0ec59..c56a66cdf4ac8 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -433,7 +433,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, + if (ret <= 0) + goto sock_err; + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); +- offset += ret - buf->page_base; ++ ret -= buf->page_base; ++ offset += ret; + if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) + goto out; + if (ret != want) +@@ -3059,6 +3060,7 @@ static struct xprt_class xs_local_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_LOCAL, + .setup = xs_setup_local, ++ .netid = { "" }, + }; + + static struct xprt_class xs_udp_transport = { +@@ -3067,6 +3069,7 @@ static struct xprt_class xs_udp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_UDP, + .setup = xs_setup_udp, ++ .netid = { "udp", "udp6", "" }, + }; + + static struct xprt_class xs_tcp_transport = { +@@ -3075,6 +3078,7 @@ static struct xprt_class xs_tcp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_TCP, + .setup = xs_setup_tcp, ++ .netid = { "tcp", "tcp6", "" }, + }; + + static struct xprt_class xs_bc_tcp_transport = { +@@ -3083,6 +3087,7 @@ static struct xprt_class xs_bc_tcp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_BC_TCP, + .setup = xs_setup_bc_tcp, ++ .netid = { "" }, + }; + + /** +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 8d0e49c46db37..3409f37d838b3 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -694,7 +694,7 @@ static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request, + static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap, + struct cfg80211_scan_request *request) + { +- u8 i; ++ int i; + u32 s_ssid; + + for (i = 0; i < request->n_ssids; i++) { +diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh +old mode 100644 +new mode 100755 +index 090b96eaf7f76..0eda9754f50b8 +--- a/samples/bpf/lwt_len_hist.sh ++++ b/samples/bpf/lwt_len_hist.sh +@@ -8,6 +8,8 @@ VETH1=tst_lwt1b + TRACE_ROOT=/sys/kernel/debug/tracing + + function cleanup { ++ # To reset saved histogram, remove pinned map ++ rm /sys/fs/bpf/tc/globals/lwt_len_hist_map + ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null + ip link del $VETH0 2> /dev/null + ip link del $VETH1 2> /dev/null +diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh +old mode 100644 +new mode 100755 +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c +index 1149e94ca32fd..33c58de58626c 100644 +--- a/samples/bpf/xdpsock_user.c ++++ b/samples/bpf/xdpsock_user.c +@@ -1250,6 +1250,8 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size) + while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) < + batch_size) { + complete_tx_only(xsk, batch_size); ++ if (benchmark_done) ++ return; + } + + for (i = 0; i < batch_size; i++) { +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl +index fab38b493cef7..0ad235ee96f91 100755 +--- a/scripts/checkpatch.pl ++++ b/scripts/checkpatch.pl +@@ -4384,7 +4384,7 @@ sub process { + $fix) { + fix_delete_line($fixlinenr, $rawline); + my $fixed_line = $rawline; +- $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/; ++ $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/; + my $line1 = $1; + my $line2 = $2; + fix_insert_line($fixlinenr, ltrim($line1)); +diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c +index 0243086fb1685..0590f86df6e40 100644 +--- a/scripts/kconfig/preprocess.c ++++ b/scripts/kconfig/preprocess.c +@@ -114,7 +114,7 @@ static char *do_error_if(int argc, char *argv[]) + if (!strcmp(argv[0], "y")) + pperror("%s", argv[1]); + +- return NULL; ++ return xstrdup(""); + } + + static char *do_filename(int argc, char *argv[]) +diff --git a/scripts/kernel-doc b/scripts/kernel-doc +index f699cf05d4098..6325bec3f66f8 100755 +--- a/scripts/kernel-doc ++++ b/scripts/kernel-doc +@@ -1390,7 +1390,7 @@ sub dump_enum($$) { + $members = $2; + } + +- if ($declaration_name) { ++ if ($members) { + my %_members; + + $members =~ s/\s+$//; +@@ -1431,7 +1431,7 @@ sub dump_enum($$) { + } + } + +-my $typedef_type = qr { ((?:\s+[\w\*]+){1,8})\s* }x; ++my $typedef_type = qr { ((?:\s+[\w\*]+\b){1,8})\s* }x; + my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x; + my $typedef_args = qr { \s*\((.*)\); }x; + +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 21989fa0c1074..f6a7e9643b546 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -537,7 +537,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + loff_t i_size; + int rc; + struct file *f = file; +- bool new_file_instance = false, modified_mode = false; ++ bool new_file_instance = false; + + /* + * For consistency, fail file's opened with the O_DIRECT flag on +@@ -555,18 +555,10 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); + flags |= O_RDONLY; + f = dentry_open(&file->f_path, flags, file->f_cred); +- if (IS_ERR(f)) { +- /* +- * Cannot open the file again, lets modify f_mode +- * of original and continue +- */ +- pr_info_ratelimited("Unable to reopen file for reading.\n"); +- f = file; +- f->f_mode |= FMODE_READ; +- modified_mode = true; +- } else { +- new_file_instance = true; +- } ++ if (IS_ERR(f)) ++ return PTR_ERR(f); ++ ++ new_file_instance = true; + } + + i_size = i_size_read(file_inode(f)); +@@ -581,8 +573,6 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + out: + if (new_file_instance) + fput(f); +- else if (modified_mode) +- f->f_mode &= ~FMODE_READ; + return rc; + } + +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 6b1826fc3658e..c46312710e73e 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -1451,7 +1451,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent + * inode_doinit with a dentry, before these inodes could + * be used again by userspace. + */ +- goto out; ++ goto out_invalid; + } + + rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid, +@@ -1508,7 +1508,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent + * could be used again by userspace. + */ + if (!dentry) +- goto out; ++ goto out_invalid; + rc = selinux_genfs_get_sid(dentry, sclass, + sbsec->flags, &sid); + if (rc) { +@@ -1533,11 +1533,10 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent + out: + spin_lock(&isec->lock); + if (isec->initialized == LABEL_PENDING) { +- if (!sid || rc) { ++ if (rc) { + isec->initialized = LABEL_INVALID; + goto out_unlock; + } +- + isec->initialized = LABEL_INITIALIZED; + isec->sid = sid; + } +@@ -1545,6 +1544,15 @@ out: + out_unlock: + spin_unlock(&isec->lock); + return rc; ++ ++out_invalid: ++ spin_lock(&isec->lock); ++ if (isec->initialized == LABEL_PENDING) { ++ isec->initialized = LABEL_INVALID; ++ isec->sid = sid; ++ } ++ spin_unlock(&isec->lock); ++ return 0; + } + + /* Convert a Linux signal to an access vector. */ +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c +index efe2406a39609..7eabb448acab4 100644 +--- a/security/smack/smack_access.c ++++ b/security/smack/smack_access.c +@@ -688,9 +688,10 @@ bool smack_privileged_cred(int cap, const struct cred *cred) + bool smack_privileged(int cap) + { + /* +- * All kernel tasks are privileged ++ * Kernel threads may not have credentials we can use. ++ * The io_uring kernel threads do have reliable credentials. + */ +- if (unlikely(current->flags & PF_KTHREAD)) ++ if ((current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD) + return true; + + return smack_privileged_cred(cap, current_cred()); +diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c +index 0aeeb6244ff6c..0f335162f87c7 100644 +--- a/sound/core/memalloc.c ++++ b/sound/core/memalloc.c +@@ -77,7 +77,8 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) + /* Assign the pool into private_data field */ + dmab->private_data = pool; + +- dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); ++ dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, ++ PAGE_SIZE); + } + + /** +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index de1917484647e..142fc751a8477 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -693,6 +693,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, + + oss_buffer_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; ++ if (!oss_buffer_size) ++ return -EINVAL; + oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); + if (atomic_read(&substream->mmap_count)) { + if (oss_buffer_size > runtime->oss.mmap_bytes) +@@ -728,17 +730,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, + + min_period_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); +- min_period_size *= oss_frame_size; +- min_period_size = roundup_pow_of_two(min_period_size); +- if (oss_period_size < min_period_size) +- oss_period_size = min_period_size; ++ if (min_period_size) { ++ min_period_size *= oss_frame_size; ++ min_period_size = roundup_pow_of_two(min_period_size); ++ if (oss_period_size < min_period_size) ++ oss_period_size = min_period_size; ++ } + + max_period_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); +- max_period_size *= oss_frame_size; +- max_period_size = rounddown_pow_of_two(max_period_size); +- if (oss_period_size > max_period_size) +- oss_period_size = max_period_size; ++ if (max_period_size) { ++ max_period_size *= oss_frame_size; ++ max_period_size = rounddown_pow_of_two(max_period_size); ++ if (oss_period_size > max_period_size) ++ oss_period_size = max_period_size; ++ } + + oss_periods = oss_buffer_size / oss_period_size; + +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index 4bb58e8b08a85..687216e745267 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -1803,7 +1803,7 @@ int snd_hda_codec_reset(struct hda_codec *codec) + return -EBUSY; + + /* OK, let it free */ +- snd_hdac_device_unregister(&codec->core); ++ device_release_driver(hda_codec_dev(codec)); + + /* allow device access again */ + snd_hda_unlock_devices(bus); +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c +index eb8ec109d7adb..d5ffcba794e50 100644 +--- a/sound/pci/hda/hda_sysfs.c ++++ b/sound/pci/hda/hda_sysfs.c +@@ -139,7 +139,7 @@ static int reconfig_codec(struct hda_codec *codec) + "The codec is being used, can't reconfigure.\n"); + goto error; + } +- err = snd_hda_codec_configure(codec); ++ err = device_reprobe(hda_codec_dev(codec)); + if (err < 0) + goto error; + err = snd_card_register(codec->card); +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index d8370a417e3d4..ee500e46dd4f6 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -95,7 +95,7 @@ enum { + }; + + /* Strings for Input Source Enum Control */ +-static const char *const in_src_str[3] = {"Rear Mic", "Line", "Front Mic" }; ++static const char *const in_src_str[3] = { "Microphone", "Line In", "Front Microphone" }; + #define IN_SRC_NUM_OF_INPUTS 3 + enum { + REAR_MIC, +@@ -1223,7 +1223,7 @@ static const struct hda_pintbl ae5_pincfgs[] = { + { 0x0e, 0x01c510f0 }, /* SPDIF In */ + { 0x0f, 0x01017114 }, /* Port A -- Rear L/R. */ + { 0x10, 0x01017012 }, /* Port D -- Center/LFE or FP Hp */ +- { 0x11, 0x01a170ff }, /* Port B -- LineMicIn2 / Rear Headphone */ ++ { 0x11, 0x012170ff }, /* Port B -- LineMicIn2 / Rear Headphone */ + { 0x12, 0x01a170f0 }, /* Port C -- LineIn1 */ + { 0x13, 0x908700f0 }, /* What U Hear In*/ + { 0x18, 0x50d000f0 }, /* N/A */ +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index b0068f8ca46dd..2ddc27db8c012 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -78,6 +78,7 @@ struct hdmi_spec_per_pin { + int pcm_idx; /* which pcm is attached. -1 means no pcm is attached */ + int repoll_count; + bool setup; /* the stream has been set up by prepare callback */ ++ bool silent_stream; + int channels; /* current number of channels */ + bool non_pcm; + bool chmap_set; /* channel-map override by ALSA API? */ +@@ -979,6 +980,13 @@ static int hdmi_choose_cvt(struct hda_codec *codec, + else + per_pin = get_pin(spec, pin_idx); + ++ if (per_pin && per_pin->silent_stream) { ++ cvt_idx = cvt_nid_to_cvt_index(codec, per_pin->cvt_nid); ++ if (cvt_id) ++ *cvt_id = cvt_idx; ++ return 0; ++ } ++ + /* Dynamically assign converter to stream */ + for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) { + per_cvt = get_cvt(spec, cvt_idx); +@@ -1642,30 +1650,95 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, + snd_hda_power_down_pm(codec); + } + ++#define I915_SILENT_RATE 48000 ++#define I915_SILENT_CHANNELS 2 ++#define I915_SILENT_FORMAT SNDRV_PCM_FORMAT_S16_LE ++#define I915_SILENT_FORMAT_BITS 16 ++#define I915_SILENT_FMT_MASK 0xf ++ + static void silent_stream_enable(struct hda_codec *codec, +- struct hdmi_spec_per_pin *per_pin) ++ struct hdmi_spec_per_pin *per_pin) + { +- unsigned int newval, oldval; +- +- codec_dbg(codec, "hdmi: enabling silent stream for NID %d\n", +- per_pin->pin_nid); ++ struct hdmi_spec *spec = codec->spec; ++ struct hdmi_spec_per_cvt *per_cvt; ++ int cvt_idx, pin_idx, err; ++ unsigned int format; + + mutex_lock(&per_pin->lock); + +- if (!per_pin->channels) +- per_pin->channels = 2; ++ if (per_pin->setup) { ++ codec_dbg(codec, "hdmi: PCM already open, no silent stream\n"); ++ goto unlock_out; ++ } + +- oldval = snd_hda_codec_read(codec, per_pin->pin_nid, 0, +- AC_VERB_GET_CONV, 0); +- newval = (oldval & 0xF0) | 0xF; +- snd_hda_codec_write(codec, per_pin->pin_nid, 0, +- AC_VERB_SET_CHANNEL_STREAMID, newval); ++ pin_idx = pin_id_to_pin_index(codec, per_pin->pin_nid, per_pin->dev_id); ++ err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx); ++ if (err) { ++ codec_err(codec, "hdmi: no free converter to enable silent mode\n"); ++ goto unlock_out; ++ } ++ ++ per_cvt = get_cvt(spec, cvt_idx); ++ per_cvt->assigned = 1; ++ per_pin->cvt_nid = per_cvt->cvt_nid; ++ per_pin->silent_stream = true; + ++ codec_dbg(codec, "hdmi: enabling silent stream pin-NID=0x%x cvt-NID=0x%x\n", ++ per_pin->pin_nid, per_cvt->cvt_nid); ++ ++ snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); ++ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, ++ AC_VERB_SET_CONNECT_SEL, ++ per_pin->mux_idx); ++ ++ /* configure unused pins to choose other converters */ ++ pin_cvt_fixup(codec, per_pin, 0); ++ ++ snd_hdac_sync_audio_rate(&codec->core, per_pin->pin_nid, ++ per_pin->dev_id, I915_SILENT_RATE); ++ ++ /* trigger silent stream generation in hw */ ++ format = snd_hdac_calc_stream_format(I915_SILENT_RATE, I915_SILENT_CHANNELS, ++ I915_SILENT_FORMAT, I915_SILENT_FORMAT_BITS, 0); ++ snd_hda_codec_setup_stream(codec, per_pin->cvt_nid, ++ I915_SILENT_FMT_MASK, I915_SILENT_FMT_MASK, format); ++ usleep_range(100, 200); ++ snd_hda_codec_setup_stream(codec, per_pin->cvt_nid, I915_SILENT_FMT_MASK, 0, format); ++ ++ per_pin->channels = I915_SILENT_CHANNELS; + hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm); + ++ unlock_out: + mutex_unlock(&per_pin->lock); + } + ++static void silent_stream_disable(struct hda_codec *codec, ++ struct hdmi_spec_per_pin *per_pin) ++{ ++ struct hdmi_spec *spec = codec->spec; ++ struct hdmi_spec_per_cvt *per_cvt; ++ int cvt_idx; ++ ++ mutex_lock(&per_pin->lock); ++ if (!per_pin->silent_stream) ++ goto unlock_out; ++ ++ codec_dbg(codec, "HDMI: disable silent stream on pin-NID=0x%x cvt-NID=0x%x\n", ++ per_pin->pin_nid, per_pin->cvt_nid); ++ ++ cvt_idx = cvt_nid_to_cvt_index(codec, per_pin->cvt_nid); ++ if (cvt_idx >= 0 && cvt_idx < spec->num_cvts) { ++ per_cvt = get_cvt(spec, cvt_idx); ++ per_cvt->assigned = 0; ++ } ++ ++ per_pin->cvt_nid = 0; ++ per_pin->silent_stream = false; ++ ++ unlock_out: ++ mutex_unlock(&spec->pcm_lock); ++} ++ + /* update ELD and jack state via audio component */ + static void sync_eld_via_acomp(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin) +@@ -1701,6 +1774,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec, + pm_ret); + silent_stream_enable(codec, per_pin); + } else if (monitor_prev && !monitor_next) { ++ silent_stream_disable(codec, per_pin); + pm_ret = snd_hda_power_down_pm(codec); + if (pm_ret < 0) + codec_err(codec, +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 8616c56248707..dde5ba2095415 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2516,6 +2516,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), ++ SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), +@@ -3104,6 +3105,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec) + case 0x10ec0215: + case 0x10ec0225: + case 0x10ec0285: ++ case 0x10ec0287: + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: +@@ -3130,6 +3132,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec) + case 0x10ec0215: + case 0x10ec0225: + case 0x10ec0285: ++ case 0x10ec0287: + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: +@@ -6366,6 +6369,7 @@ enum { + ALC287_FIXUP_HP_GPIO_LED, + ALC256_FIXUP_HP_HEADSET_MIC, + ALC236_FIXUP_DELL_AIO_HEADSET_MIC, ++ ALC282_FIXUP_ACER_DISABLE_LINEOUT, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -7789,6 +7793,16 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE + }, ++ [ALC282_FIXUP_ACER_DISABLE_LINEOUT] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x1b, 0x411111f0 }, ++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { }, ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -7803,11 +7817,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), ++ SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK), ++ SND_PCI_QUIRK(0x1025, 0x1167, "Acer Veriton N6640G", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), +@@ -7868,6 +7885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -7956,6 +7974,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), +@@ -7976,6 +7995,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ++ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), +@@ -8013,6 +8033,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), ++ SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), +@@ -8560,6 +8581,22 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x12, 0x90a60140}, + {0x19, 0x04a11030}, + {0x21, 0x04211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ++ ALC282_STANDARD_PINS, ++ {0x12, 0x90a609c0}, ++ {0x18, 0x03a11830}, ++ {0x19, 0x04a19831}, ++ {0x1a, 0x0481303f}, ++ {0x1b, 0x04211020}, ++ {0x21, 0x0321101f}), ++ SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ++ ALC282_STANDARD_PINS, ++ {0x12, 0x90a60940}, ++ {0x18, 0x03a11830}, ++ {0x19, 0x04a19831}, ++ {0x1a, 0x0481303f}, ++ {0x1b, 0x04211020}, ++ {0x21, 0x0321101f}), + SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC282_STANDARD_PINS, + {0x12, 0x90a60130}, +@@ -8573,11 +8610,20 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x12, 0x90a60130}, + {0x19, 0x03a11020}, + {0x21, 0x0321101f}), ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, ++ {0x14, 0x90170110}, ++ {0x19, 0x04a11040}, ++ {0x21, 0x04211020}), + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, + {0x12, 0x90a60130}, + {0x14, 0x90170110}, + {0x19, 0x04a11040}, + {0x21, 0x04211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_HEADSET_JACK, ++ {0x14, 0x90170110}, ++ {0x17, 0x90170111}, ++ {0x19, 0x03a11030}, ++ {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x17, 0x90170110}, +diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c +index a7702e64ec512..849288d01c6b4 100644 +--- a/sound/soc/amd/acp-da7219-max98357a.c ++++ b/sound/soc/amd/acp-da7219-max98357a.c +@@ -73,8 +73,13 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) + return ret; + } + +- da7219_dai_wclk = clk_get(component->dev, "da7219-dai-wclk"); +- da7219_dai_bclk = clk_get(component->dev, "da7219-dai-bclk"); ++ da7219_dai_wclk = devm_clk_get(component->dev, "da7219-dai-wclk"); ++ if (IS_ERR(da7219_dai_wclk)) ++ return PTR_ERR(da7219_dai_wclk); ++ ++ da7219_dai_bclk = devm_clk_get(component->dev, "da7219-dai-bclk"); ++ if (IS_ERR(da7219_dai_bclk)) ++ return PTR_ERR(da7219_dai_bclk); + + ret = snd_soc_card_jack_new(card, "Headset Jack", + SND_JACK_HEADSET | SND_JACK_LINEOUT | +diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c +index 31b797c8bfe64..77f2d93896067 100644 +--- a/sound/soc/amd/raven/pci-acp3x.c ++++ b/sound/soc/amd/raven/pci-acp3x.c +@@ -118,6 +118,10 @@ static int snd_acp3x_probe(struct pci_dev *pci, + int ret, i; + u32 addr, val; + ++ /* Raven device detection */ ++ if (pci->revision != 0x00) ++ return -ENODEV; ++ + if (pci_enable_device(pci)) { + dev_err(&pci->dev, "pci_enable_device failed\n"); + return -ENODEV; +diff --git a/sound/soc/amd/renoir/rn-pci-acp3x.c b/sound/soc/amd/renoir/rn-pci-acp3x.c +index b943e59fc3024..338b78c514ec9 100644 +--- a/sound/soc/amd/renoir/rn-pci-acp3x.c ++++ b/sound/soc/amd/renoir/rn-pci-acp3x.c +@@ -6,6 +6,7 @@ + + #include <linux/pci.h> + #include <linux/acpi.h> ++#include <linux/dmi.h> + #include <linux/module.h> + #include <linux/io.h> + #include <linux/delay.h> +@@ -20,14 +21,13 @@ module_param(acp_power_gating, int, 0644); + MODULE_PARM_DESC(acp_power_gating, "Enable acp power gating"); + + /** +- * dmic_acpi_check = -1 - Checks ACPI method to know DMIC hardware status runtime +- * = 0 - Skips the DMIC device creation and returns probe failure +- * = 1 - Assumes that platform has DMIC support and skips ACPI +- * method check ++ * dmic_acpi_check = -1 - Use ACPI/DMI method to detect the DMIC hardware presence at runtime ++ * = 0 - Skip the DMIC device creation and return probe failure ++ * = 1 - Force DMIC support + */ + static int dmic_acpi_check = ACP_DMIC_AUTO; + module_param(dmic_acpi_check, bint, 0644); +-MODULE_PARM_DESC(dmic_acpi_check, "checks Dmic hardware runtime"); ++MODULE_PARM_DESC(dmic_acpi_check, "Digital microphone presence (-1=auto, 0=none, 1=force)"); + + struct acp_dev_data { + void __iomem *acp_base; +@@ -163,6 +163,17 @@ static int rn_acp_deinit(void __iomem *acp_base) + return 0; + } + ++static const struct dmi_system_id rn_acp_quirk_table[] = { ++ { ++ /* Lenovo IdeaPad Flex 5 14ARE05, IdeaPad 5 15ARE05 */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"), ++ } ++ }, ++ {} ++}; ++ + static int snd_rn_acp_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) + { +@@ -172,10 +183,15 @@ static int snd_rn_acp_probe(struct pci_dev *pci, + acpi_handle handle; + acpi_integer dmic_status; + #endif ++ const struct dmi_system_id *dmi_id; + unsigned int irqflags; + int ret, index; + u32 addr; + ++ /* Renoir device check */ ++ if (pci->revision != 0x01) ++ return -ENODEV; ++ + if (pci_enable_device(pci)) { + dev_err(&pci->dev, "pci_enable_device failed\n"); + return -ENODEV; +@@ -232,6 +248,12 @@ static int snd_rn_acp_probe(struct pci_dev *pci, + goto de_init; + } + #endif ++ dmi_id = dmi_first_match(rn_acp_quirk_table); ++ if (dmi_id && !dmi_id->driver_data) { ++ dev_info(&pci->dev, "ACPI settings override using DMI (ACP mic is not present)"); ++ ret = -ENODEV; ++ goto de_init; ++ } + } + + adata->res = devm_kzalloc(&pci->dev, +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig +index bd8854bfd2ee4..142373ec411ad 100644 +--- a/sound/soc/atmel/Kconfig ++++ b/sound/soc/atmel/Kconfig +@@ -148,6 +148,7 @@ config SND_MCHP_SOC_SPDIFTX + config SND_MCHP_SOC_SPDIFRX + tristate "Microchip ASoC driver for boards using S/PDIF RX" + depends on OF && (ARCH_AT91 || COMPILE_TEST) ++ depends on COMMON_CLK + select SND_SOC_GENERIC_DMAENGINE_PCM + select REGMAP_MMIO + help +diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c +index 28f039adfa138..5c3b7e5e55d23 100644 +--- a/sound/soc/codecs/cros_ec_codec.c ++++ b/sound/soc/codecs/cros_ec_codec.c +@@ -332,7 +332,7 @@ static int i2s_rx_event(struct snd_soc_dapm_widget *w, + snd_soc_dapm_to_component(w->dapm); + struct cros_ec_codec_priv *priv = + snd_soc_component_get_drvdata(component); +- struct ec_param_ec_codec_i2s_rx p; ++ struct ec_param_ec_codec_i2s_rx p = {}; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: +diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c +index 2ad00ed21bec6..2f10991a8bdb5 100644 +--- a/sound/soc/codecs/cx2072x.c ++++ b/sound/soc/codecs/cx2072x.c +@@ -1579,7 +1579,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = { + .id = CX2072X_DAI_DSP, + .probe = cx2072x_dsp_dai_probe, + .playback = { +- .stream_name = "Playback", ++ .stream_name = "DSP Playback", + .channels_min = 2, + .channels_max = 2, + .rates = CX2072X_RATES_DSP, +@@ -1591,7 +1591,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = { + .name = "cx2072x-aec", + .id = 3, + .capture = { +- .stream_name = "Capture", ++ .stream_name = "AEC Capture", + .channels_min = 2, + .channels_max = 2, + .rates = CX2072X_RATES_DSP, +diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c +index ff5cc9bbec291..bb736c44e68a3 100644 +--- a/sound/soc/codecs/max98390.c ++++ b/sound/soc/codecs/max98390.c +@@ -784,6 +784,7 @@ static int max98390_dsm_init(struct snd_soc_component *component) + if (fw->size < MAX98390_DSM_PARAM_MIN_SIZE) { + dev_err(component->dev, + "param fw is invalid.\n"); ++ ret = -EINVAL; + goto err_alloc; + } + dsm_param = (char *)fw->data; +@@ -794,6 +795,7 @@ static int max98390_dsm_init(struct snd_soc_component *component) + fw->size < param_size + MAX98390_DSM_PAYLOAD_OFFSET) { + dev_err(component->dev, + "param fw is invalid.\n"); ++ ret = -EINVAL; + goto err_alloc; + } + regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80); +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index fc9ea198ac799..f57884113406b 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -4645,8 +4645,12 @@ static int wm8994_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + pm_runtime_idle(&pdev->dev); + +- return devm_snd_soc_register_component(&pdev->dev, &soc_component_dev_wm8994, ++ ret = devm_snd_soc_register_component(&pdev->dev, &soc_component_dev_wm8994, + wm8994_dai, ARRAY_SIZE(wm8994_dai)); ++ if (ret < 0) ++ pm_runtime_disable(&pdev->dev); ++ ++ return ret; + } + + static int wm8994_remove(struct platform_device *pdev) +diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c +index 37e4bb3dbd8a9..229f2986cd96b 100644 +--- a/sound/soc/codecs/wm8997.c ++++ b/sound/soc/codecs/wm8997.c +@@ -1177,6 +1177,8 @@ static int wm8997_probe(struct platform_device *pdev) + goto err_spk_irqs; + } + ++ return ret; ++ + err_spk_irqs: + arizona_free_spk_irqs(arizona); + +diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c +index f6c5cc80c970b..5413254295b70 100644 +--- a/sound/soc/codecs/wm8998.c ++++ b/sound/soc/codecs/wm8998.c +@@ -1375,7 +1375,7 @@ static int wm8998_probe(struct platform_device *pdev) + + ret = arizona_init_spk_irqs(arizona); + if (ret < 0) +- return ret; ++ goto err_pm_disable; + + ret = devm_snd_soc_register_component(&pdev->dev, + &soc_component_dev_wm8998, +@@ -1390,6 +1390,8 @@ static int wm8998_probe(struct platform_device *pdev) + + err_spk_irqs: + arizona_free_spk_irqs(arizona); ++err_pm_disable: ++ pm_runtime_disable(&pdev->dev); + + return ret; + } +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c +index e61d00486c653..dec8716aa8ef5 100644 +--- a/sound/soc/codecs/wm_adsp.c ++++ b/sound/soc/codecs/wm_adsp.c +@@ -1519,7 +1519,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, + ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL); + if (!ctl_work) { + ret = -ENOMEM; +- goto err_ctl_cache; ++ goto err_list_del; + } + + ctl_work->dsp = dsp; +@@ -1529,7 +1529,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, + + return 0; + +-err_ctl_cache: ++err_list_del: ++ list_del(&ctl->list); + kfree(ctl->cache); + err_ctl_subname: + kfree(ctl->subname); +diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig +index a5b446d5af19f..c1bf69a0bcfe1 100644 +--- a/sound/soc/intel/Kconfig ++++ b/sound/soc/intel/Kconfig +@@ -198,7 +198,7 @@ endif ## SND_SOC_INTEL_SST_TOPLEVEL || SND_SOC_SOF_INTEL_TOPLEVEL + + config SND_SOC_INTEL_KEEMBAY + tristate "Keembay Platforms" +- depends on ARM64 || COMPILE_TEST ++ depends on ARCH_KEEMBAY || COMPILE_TEST + depends on COMMON_CLK + help + If you have a Intel Keembay platform then enable this option +diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c +index b6e63ea13d64e..c2a9757181fe1 100644 +--- a/sound/soc/intel/boards/sof_maxim_common.c ++++ b/sound/soc/intel/boards/sof_maxim_common.c +@@ -49,11 +49,11 @@ static int max98373_hw_params(struct snd_pcm_substream *substream, + for_each_rtd_codec_dais(rtd, j, codec_dai) { + if (!strcmp(codec_dai->component->name, MAX_98373_DEV0_NAME)) { + /* DEV0 tdm slot configuration */ +- snd_soc_dai_set_tdm_slot(codec_dai, 0x03, 3, 8, 24); ++ snd_soc_dai_set_tdm_slot(codec_dai, 0x03, 3, 8, 32); + } + if (!strcmp(codec_dai->component->name, MAX_98373_DEV1_NAME)) { + /* DEV1 tdm slot configuration */ +- snd_soc_dai_set_tdm_slot(codec_dai, 0x0C, 3, 8, 24); ++ snd_soc_dai_set_tdm_slot(codec_dai, 0x0C, 3, 8, 32); + } + } + return 0; +diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c +index c7bd20104b204..0793e284d0e78 100644 +--- a/sound/soc/jz4740/jz4740-i2s.c ++++ b/sound/soc/jz4740/jz4740-i2s.c +@@ -312,10 +312,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, + switch (clk_id) { + case JZ4740_I2S_CLKSRC_EXT: + parent = clk_get(NULL, "ext"); ++ if (IS_ERR(parent)) ++ return PTR_ERR(parent); + clk_set_parent(i2s->clk_i2s, parent); + break; + case JZ4740_I2S_CLKSRC_PLL: + parent = clk_get(NULL, "pll half"); ++ if (IS_ERR(parent)) ++ return PTR_ERR(parent); + clk_set_parent(i2s->clk_i2s, parent); + ret = clk_set_rate(i2s->clk_i2s, freq); + break; +diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig +index 363dc3b1bbe47..ce0cbdc69b2ec 100644 +--- a/sound/soc/meson/Kconfig ++++ b/sound/soc/meson/Kconfig +@@ -1,6 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0-only + menu "ASoC support for Amlogic platforms" +- depends on ARCH_MESON || COMPILE_TEST ++ depends on ARCH_MESON || (COMPILE_TEST && COMMON_CLK) + + config SND_MESON_AIU + tristate "Amlogic AIU" +diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig +index 2696ffcba880f..a824f793811be 100644 +--- a/sound/soc/qcom/Kconfig ++++ b/sound/soc/qcom/Kconfig +@@ -106,6 +106,7 @@ config SND_SOC_QDSP6 + config SND_SOC_MSM8996 + tristate "SoC Machine driver for MSM8996 and APQ8096 boards" + depends on QCOM_APR ++ depends on COMMON_CLK + select SND_SOC_QDSP6 + select SND_SOC_QCOM_COMMON + help +diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c +index 54660f126d09e..09af007007007 100644 +--- a/sound/soc/qcom/common.c ++++ b/sound/soc/qcom/common.c +@@ -58,7 +58,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card) + dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); + if (!dlc) { + ret = -ENOMEM; +- goto err; ++ goto err_put_np; + } + + link->cpus = &dlc[0]; +@@ -70,7 +70,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card) + ret = of_property_read_string(np, "link-name", &link->name); + if (ret) { + dev_err(card->dev, "error getting codec dai_link name\n"); +- goto err; ++ goto err_put_np; + } + + cpu = of_get_child_by_name(np, "cpu"); +@@ -130,8 +130,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card) + } else { + /* DPCM frontend */ + dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL); +- if (!dlc) +- return -ENOMEM; ++ if (!dlc) { ++ ret = -ENOMEM; ++ goto err; ++ } + + link->codecs = dlc; + link->num_codecs = 1; +@@ -158,10 +160,11 @@ int qcom_snd_parse_of(struct snd_soc_card *card) + + return 0; + err: +- of_node_put(np); + of_node_put(cpu); + of_node_put(codec); + of_node_put(platform); ++err_put_np: ++ of_node_put(np); + return ret; + } + EXPORT_SYMBOL(qcom_snd_parse_of); +diff --git a/sound/soc/qcom/lpass-hdmi.c b/sound/soc/qcom/lpass-hdmi.c +index 172952d3a5d66..abfb8737a89f4 100644 +--- a/sound/soc/qcom/lpass-hdmi.c ++++ b/sound/soc/qcom/lpass-hdmi.c +@@ -24,7 +24,7 @@ static int lpass_hdmi_daiops_hw_params(struct snd_pcm_substream *substream, + unsigned int rate = params_rate(params); + unsigned int channels = params_channels(params); + unsigned int ret; +- unsigned int bitwidth; ++ int bitwidth; + unsigned int word_length; + unsigned int ch_sts_buf0; + unsigned int ch_sts_buf1; +diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c +index 2efc2eaa04243..acfc0c698f6a1 100644 +--- a/sound/soc/qcom/qdsp6/q6afe-clocks.c ++++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c +@@ -16,6 +16,7 @@ + .afe_clk_id = Q6AFE_##id, \ + .name = #id, \ + .attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \ ++ .rate = 19200000, \ + .hw.init = &(struct clk_init_data) { \ + .ops = &clk_q6afe_ops, \ + .name = #id, \ +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index dcab9527ba3d7..91bf339581590 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2231,6 +2231,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_DRAIN: + ret = dpcm_dai_trigger_fe_be(substream, cmd, true); + break; + case SNDRV_PCM_TRIGGER_STOP: +@@ -2248,6 +2249,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_DRAIN: + ret = dpcm_dai_trigger_fe_be(substream, cmd, false); + break; + case SNDRV_PCM_TRIGGER_STOP: +diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig +index a066e08860cbf..5bfc2f8b13b90 100644 +--- a/sound/soc/sof/intel/Kconfig ++++ b/sound/soc/sof/intel/Kconfig +@@ -271,6 +271,7 @@ config SND_SOC_SOF_JASPERLAKE + + config SND_SOC_SOF_HDA_COMMON + tristate ++ select SND_INTEL_DSP_CONFIG + select SND_SOC_SOF_INTEL_COMMON + select SND_SOC_SOF_HDA_LINK_BASELINE + help +@@ -330,7 +331,6 @@ config SND_SOC_SOF_HDA + tristate + select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK + select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC +- select SND_INTEL_DSP_CONFIG + help + This option is not user-selectable but automagically handled by + 'select' statements at a higher level +diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c +index f23ff29e7c1d3..a994b5cf87b31 100644 +--- a/sound/soc/sunxi/sun4i-i2s.c ++++ b/sound/soc/sunxi/sun4i-i2s.c +@@ -450,11 +450,11 @@ static int sun8i_i2s_set_chan_cfg(const struct sun4i_i2s *i2s, + switch (i2s->format & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: + case SND_SOC_DAIFMT_DSP_B: +- case SND_SOC_DAIFMT_LEFT_J: +- case SND_SOC_DAIFMT_RIGHT_J: + lrck_period = params_physical_width(params) * slots; + break; + ++ case SND_SOC_DAIFMT_LEFT_J: ++ case SND_SOC_DAIFMT_RIGHT_J: + case SND_SOC_DAIFMT_I2S: + lrck_period = params_physical_width(params); + break; +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 4457214a3ae62..57d6d4ff01e08 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -382,6 +382,9 @@ static const struct usb_audio_device_name usb_audio_names[] = { + /* ASUS ROG Strix */ + PROFILE_NAME(0x0b05, 0x1917, + "Realtek", "ALC1220-VB-DT", "Realtek-ALC1220-VB-Desktop"), ++ /* ASUS PRIME TRX40 PRO-S */ ++ PROFILE_NAME(0x0b05, 0x1918, ++ "Realtek", "ALC1220-VB-DT", "Realtek-ALC1220-VB-Desktop"), + + /* Dell WD15 Dock */ + PROFILE_NAME(0x0bda, 0x4014, "Dell", "WD15 Dock", "Dell-WD15-Dock"), +diff --git a/sound/usb/clock.c b/sound/usb/clock.c +index f3ca59005d914..674e15bf98ed5 100644 +--- a/sound/usb/clock.c ++++ b/sound/usb/clock.c +@@ -531,6 +531,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface, + } + + crate = data[0] | (data[1] << 8) | (data[2] << 16); ++ if (!crate) { ++ dev_info(&dev->dev, "failed to read current rate; disabling the check\n"); ++ chip->sample_rate_read_error = 3; /* three strikes, see above */ ++ return 0; ++ } ++ + if (crate != rate) { + dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate); + // runtime->rate = crate; +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index c50be2f75f702..f82c2ab809c1d 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1799,6 +1799,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, + case 0x25ce: /* Mytek devices */ + case 0x278b: /* Rotel? */ + case 0x292b: /* Gustard/Ess based devices */ ++ case 0x2972: /* FiiO devices */ + case 0x2ab6: /* T+A devices */ + case 0x3353: /* Khadas devices */ + case 0x3842: /* EVGA */ +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile +index cdde783f3018b..89ba522e377dc 100644 +--- a/tools/build/feature/Makefile ++++ b/tools/build/feature/Makefile +@@ -90,7 +90,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$( + ############################### + + $(OUTPUT)test-all.bin: +- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd ++ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap + + $(OUTPUT)test-hello.bin: + $(BUILD) +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index 28baee7ba1ca8..ad165e6e74bc0 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -7649,6 +7649,16 @@ bool bpf_map__is_pinned(const struct bpf_map *map) + return map->pinned; + } + ++static void sanitize_pin_path(char *s) ++{ ++ /* bpffs disallows periods in path names */ ++ while (*s) { ++ if (*s == '.') ++ *s = '_'; ++ s++; ++ } ++} ++ + int bpf_object__pin_maps(struct bpf_object *obj, const char *path) + { + struct bpf_map *map; +@@ -7678,6 +7688,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) + err = -ENAMETOOLONG; + goto err_unpin_maps; + } ++ sanitize_pin_path(buf); + pin_path = buf; + } else if (!map->pin_path) { + continue; +@@ -7722,6 +7733,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; ++ sanitize_pin_path(buf); + pin_path = buf; + } else if (!map->pin_path) { + continue; +diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c +index d5771e4d094f8..4c59f3ae438fc 100644 +--- a/tools/perf/tests/expand-cgroup.c ++++ b/tools/perf/tests/expand-cgroup.c +@@ -145,7 +145,7 @@ static int expand_libpfm_events(void) + int ret; + struct evlist *evlist; + struct rblist metric_events; +- const char event_str[] = "UNHALTED_CORE_CYCLES"; ++ const char event_str[] = "CYCLES"; + struct option opt = { + .value = &evlist, + }; +diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c +index d3517a74d95e3..31f987bb7ebba 100644 +--- a/tools/perf/tests/pmu-events.c ++++ b/tools/perf/tests/pmu-events.c +@@ -561,7 +561,7 @@ static int metric_parse_fake(const char *str) + } + } + +- if (expr__parse(&result, &ctx, str, 1)) ++ if (expr__parse(&result, &ctx, str, 0)) + pr_err("expr__parse failed\n"); + else + ret = 0; +diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c +index e687497b3aac0..a4a100425b3a2 100644 +--- a/tools/perf/util/parse-regs-options.c ++++ b/tools/perf/util/parse-regs-options.c +@@ -54,7 +54,7 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr) + #endif + fputc('\n', stderr); + /* just printing available regs */ +- return -1; ++ goto error; + } + #ifdef HAVE_PERF_REGS_SUPPORT + for (r = sample_reg_masks; r->name; r++) { +diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c +index 064b63a6a3f31..bbecb449ea944 100644 +--- a/tools/perf/util/probe-file.c ++++ b/tools/perf/util/probe-file.c +@@ -791,7 +791,7 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note, + const char *sdtgrp) + { + struct strbuf buf; +- char *ret = NULL, **args; ++ char *ret = NULL; + int i, args_count, err; + unsigned long long ref_ctr_offset; + +@@ -813,12 +813,19 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note, + goto out; + + if (note->args) { +- args = argv_split(note->args, &args_count); ++ char **args = argv_split(note->args, &args_count); ++ ++ if (args == NULL) ++ goto error; + + for (i = 0; i < args_count; ++i) { +- if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) ++ if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) { ++ argv_free(args); + goto error; ++ } + } ++ ++ argv_free(args); + } + + out: +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile +index 542768f5195b7..136df8c102812 100644 +--- a/tools/testing/selftests/bpf/Makefile ++++ b/tools/testing/selftests/bpf/Makefile +@@ -220,7 +220,8 @@ $(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ + # build would have failed anyways. + define get_sys_includes + $(shell $(1) -v -E - </dev/null 2>&1 \ +- | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') ++ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ ++$(shell $(1) -dM -E - </dev/null | grep '#define __riscv_xlen ' | sed 's/#define /-D/' | sed 's/ /=/') + endef + + # Determine target endianness. +diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c +index 0758ba229ae0e..09529e33be982 100644 +--- a/tools/testing/selftests/bpf/progs/local_storage.c ++++ b/tools/testing/selftests/bpf/progs/local_storage.c +@@ -58,20 +58,22 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim) + { + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; ++ int err; + + if (pid != monitored_pid) + return 0; + + storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0, +- BPF_SK_STORAGE_GET_F_CREATE); ++ BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + +- if (storage->value == DUMMY_STORAGE_VALUE) ++ if (storage->value != DUMMY_STORAGE_VALUE) + inode_storage_result = -1; + +- inode_storage_result = +- bpf_inode_storage_delete(&inode_storage_map, victim->d_inode); ++ err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode); ++ if (!err) ++ inode_storage_result = err; + + return 0; + } +@@ -82,19 +84,23 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, + { + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; ++ int err; + + if (pid != monitored_pid) + return 0; + + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, +- BPF_SK_STORAGE_GET_F_CREATE); ++ BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + +- if (storage->value == DUMMY_STORAGE_VALUE) ++ if (storage->value != DUMMY_STORAGE_VALUE) + sk_storage_result = -1; + +- sk_storage_result = bpf_sk_storage_delete(&sk_storage_map, sock->sk); ++ err = bpf_sk_storage_delete(&sk_storage_map, sock->sk); ++ if (!err) ++ sk_storage_result = err; ++ + return 0; + } + +@@ -109,7 +115,7 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + return 0; + + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, +- BPF_SK_STORAGE_GET_F_CREATE); ++ BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + +@@ -131,7 +137,7 @@ int BPF_PROG(file_open, struct file *file) + return 0; + + storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0, +- BPF_LOCAL_STORAGE_GET_F_CREATE); ++ BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + +diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +index f48dbfe24ddc8..a621b58ab079d 100644 +--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c ++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +@@ -15,7 +15,6 @@ + #include <linux/ip.h> + #include <linux/ipv6.h> + #include <linux/types.h> +-#include <linux/tcp.h> + #include <linux/socket.h> + #include <linux/pkt_cls.h> + #include <linux/erspan.h> +@@ -528,12 +527,11 @@ int _ipip_set_tunnel(struct __sk_buff *skb) + struct bpf_tunnel_key key = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; +- struct tcphdr *tcp = data + sizeof(*iph); + void *data_end = (void *)(long)skb->data_end; + int ret; + + /* single length check */ +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { ++ if (data + sizeof(*iph) > data_end) { + ERROR(1); + return TC_ACT_SHOT; + } +@@ -541,16 +539,6 @@ int _ipip_set_tunnel(struct __sk_buff *skb) + key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) { + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ +- } else { +- if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) +- return TC_ACT_SHOT; +- +- if (tcp->dest == bpf_htons(5200)) +- key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ +- else if (tcp->dest == bpf_htons(5201)) +- key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */ +- else +- return TC_ACT_SHOT; + } + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); +@@ -585,19 +573,20 @@ int _ipip6_set_tunnel(struct __sk_buff *skb) + struct bpf_tunnel_key key = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; +- struct tcphdr *tcp = data + sizeof(*iph); + void *data_end = (void *)(long)skb->data_end; + int ret; + + /* single length check */ +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { ++ if (data + sizeof(*iph) > data_end) { + ERROR(1); + return TC_ACT_SHOT; + } + + __builtin_memset(&key, 0x0, sizeof(key)); +- key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ + key.tunnel_ttl = 64; ++ if (iph->protocol == IPPROTO_ICMP) { ++ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ ++ } + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), + BPF_F_TUNINFO_IPV6); +@@ -634,35 +623,18 @@ int _ip6ip6_set_tunnel(struct __sk_buff *skb) + struct bpf_tunnel_key key = {}; + void *data = (void *)(long)skb->data; + struct ipv6hdr *iph = data; +- struct tcphdr *tcp = data + sizeof(*iph); + void *data_end = (void *)(long)skb->data_end; + int ret; + + /* single length check */ +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { ++ if (data + sizeof(*iph) > data_end) { + ERROR(1); + return TC_ACT_SHOT; + } + +- key.remote_ipv6[0] = bpf_htonl(0x2401db00); + key.tunnel_ttl = 64; +- + if (iph->nexthdr == 58 /* NEXTHDR_ICMP */) { +- key.remote_ipv6[3] = bpf_htonl(1); +- } else { +- if (iph->nexthdr != 6 /* NEXTHDR_TCP */) { +- ERROR(iph->nexthdr); +- return TC_ACT_SHOT; +- } +- +- if (tcp->dest == bpf_htons(5200)) { +- key.remote_ipv6[3] = bpf_htonl(1); +- } else if (tcp->dest == bpf_htons(5201)) { +- key.remote_ipv6[3] = bpf_htonl(2); +- } else { +- ERROR(tcp->dest); +- return TC_ACT_SHOT; +- } ++ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ + } + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), +diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c +index 0fa1e421c3d7a..427ca00a32177 100644 +--- a/tools/testing/selftests/bpf/test_sockmap.c ++++ b/tools/testing/selftests/bpf/test_sockmap.c +@@ -1273,6 +1273,16 @@ static char *test_to_str(int test) + return "unknown"; + } + ++static void append_str(char *dst, const char *src, size_t dst_cap) ++{ ++ size_t avail = dst_cap - strlen(dst); ++ ++ if (avail <= 1) /* just zero byte could be written */ ++ return; ++ ++ strncat(dst, src, avail - 1); /* strncat() adds + 1 for zero byte */ ++} ++ + #define OPTSTRING 60 + static void test_options(char *options) + { +@@ -1281,42 +1291,42 @@ static void test_options(char *options) + memset(options, 0, OPTSTRING); + + if (txmsg_pass) +- strncat(options, "pass,", OPTSTRING); ++ append_str(options, "pass,", OPTSTRING); + if (txmsg_redir) +- strncat(options, "redir,", OPTSTRING); ++ append_str(options, "redir,", OPTSTRING); + if (txmsg_drop) +- strncat(options, "drop,", OPTSTRING); ++ append_str(options, "drop,", OPTSTRING); + if (txmsg_apply) { + snprintf(tstr, OPTSTRING, "apply %d,", txmsg_apply); +- strncat(options, tstr, OPTSTRING); ++ append_str(options, tstr, OPTSTRING); + } + if (txmsg_cork) { + snprintf(tstr, OPTSTRING, "cork %d,", txmsg_cork); +- strncat(options, tstr, OPTSTRING); ++ append_str(options, tstr, OPTSTRING); + } + if (txmsg_start) { + snprintf(tstr, OPTSTRING, "start %d,", txmsg_start); +- strncat(options, tstr, OPTSTRING); ++ append_str(options, tstr, OPTSTRING); + } + if (txmsg_end) { + snprintf(tstr, OPTSTRING, "end %d,", txmsg_end); +- strncat(options, tstr, OPTSTRING); ++ append_str(options, tstr, OPTSTRING); + } + if (txmsg_start_pop) { + snprintf(tstr, OPTSTRING, "pop (%d,%d),", + txmsg_start_pop, txmsg_start_pop + txmsg_pop); +- strncat(options, tstr, OPTSTRING); ++ append_str(options, tstr, OPTSTRING); + } + if (txmsg_ingress) +- strncat(options, "ingress,", OPTSTRING); ++ append_str(options, "ingress,", OPTSTRING); + if (txmsg_redir_skb) +- strncat(options, "redir_skb,", OPTSTRING); ++ append_str(options, "redir_skb,", OPTSTRING); + if (txmsg_ktls_skb) +- strncat(options, "ktls_skb,", OPTSTRING); ++ append_str(options, "ktls_skb,", OPTSTRING); + if (ktls) +- strncat(options, "ktls,", OPTSTRING); ++ append_str(options, "ktls,", OPTSTRING); + if (peek_flag) +- strncat(options, "peek,", OPTSTRING); ++ append_str(options, "peek,", OPTSTRING); + } + + static int __test_exec(int cgrp, int test, struct sockmap_options *opt) +diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh +index bd12ec97a44df..1ccbe804e8e1c 100755 +--- a/tools/testing/selftests/bpf/test_tunnel.sh ++++ b/tools/testing/selftests/bpf/test_tunnel.sh +@@ -24,12 +24,12 @@ + # Root namespace with metadata-mode tunnel + BPF + # Device names and addresses: + # veth1 IP: 172.16.1.200, IPv6: 00::22 (underlay) +-# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200 (overlay) ++# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) + # + # Namespace at_ns0 with native tunnel + # Device names and addresses: + # veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) +-# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100 (overlay) ++# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) + # + # + # End-to-end ping packet flow +@@ -250,7 +250,7 @@ add_ipip_tunnel() + ip addr add dev $DEV 10.1.1.200/24 + } + +-add_ipip6tnl_tunnel() ++add_ip6tnl_tunnel() + { + ip netns exec at_ns0 ip addr add ::11/96 dev veth0 + ip netns exec at_ns0 ip link set dev veth0 up +@@ -262,11 +262,13 @@ add_ipip6tnl_tunnel() + ip link add dev $DEV_NS type $TYPE \ + local ::11 remote ::22 + ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 ++ ip netns exec at_ns0 ip addr add dev $DEV_NS 1::11/96 + ip netns exec at_ns0 ip link set dev $DEV_NS up + + # root namespace + ip link add dev $DEV type $TYPE external + ip addr add dev $DEV 10.1.1.200/24 ++ ip addr add dev $DEV 1::22/96 + ip link set dev $DEV up + } + +@@ -534,7 +536,7 @@ test_ipip6() + + check $TYPE + config_device +- add_ipip6tnl_tunnel ++ add_ip6tnl_tunnel + ip link set dev veth1 mtu 1500 + attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel + # underlay +@@ -553,6 +555,34 @@ test_ipip6() + echo -e ${GREEN}"PASS: $TYPE"${NC} + } + ++test_ip6ip6() ++{ ++ TYPE=ip6tnl ++ DEV_NS=ip6ip6tnl00 ++ DEV=ip6ip6tnl11 ++ ret=0 ++ ++ check $TYPE ++ config_device ++ add_ip6tnl_tunnel ++ ip link set dev veth1 mtu 1500 ++ attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel ++ # underlay ++ ping6 $PING_ARG ::11 ++ # ip6 over ip6 ++ ping6 $PING_ARG 1::11 ++ check_err $? ++ ip netns exec at_ns0 ping6 $PING_ARG 1::22 ++ check_err $? ++ cleanup ++ ++ if [ $ret -ne 0 ]; then ++ echo -e ${RED}"FAIL: ip6$TYPE"${NC} ++ return 1 ++ fi ++ echo -e ${GREEN}"PASS: ip6$TYPE"${NC} ++} ++ + setup_xfrm_tunnel() + { + auth=0x$(printf '1%.0s' {1..40}) +@@ -646,6 +676,7 @@ cleanup() + ip link del veth1 2> /dev/null + ip link del ipip11 2> /dev/null + ip link del ipip6tnl11 2> /dev/null ++ ip link del ip6ip6tnl11 2> /dev/null + ip link del gretap11 2> /dev/null + ip link del ip6gre11 2> /dev/null + ip link del ip6gretap11 2> /dev/null +@@ -742,6 +773,10 @@ bpf_tunnel_test() + test_ipip6 + errors=$(( $errors + $? )) + ++ echo "Testing IP6IP6 tunnel..." ++ test_ip6ip6 ++ errors=$(( $errors + $? )) ++ + echo "Testing IPSec tunnel..." + test_xfrm_tunnel + errors=$(( $errors + $? )) +diff --git a/tools/testing/selftests/run_kselftest.sh b/tools/testing/selftests/run_kselftest.sh +index 609a4ef9300e3..97165a83df632 100755 +--- a/tools/testing/selftests/run_kselftest.sh ++++ b/tools/testing/selftests/run_kselftest.sh +@@ -48,7 +48,7 @@ while true; do + -l | --list) + echo "$available" + exit 0 ;; +- -n | --dry-run) ++ -d | --dry-run) + dryrun="echo" + shift ;; + -h | --help) +diff --git a/tools/testing/selftests/seccomp/config b/tools/testing/selftests/seccomp/config +index 64c19d8eba795..ad431a5178fbe 100644 +--- a/tools/testing/selftests/seccomp/config ++++ b/tools/testing/selftests/seccomp/config +@@ -1,3 +1,4 @@ ++CONFIG_PID_NS=y + CONFIG_SECCOMP=y + CONFIG_SECCOMP_FILTER=y + CONFIG_USER_NS=y |