summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-08-11 10:50:50 -0400
committerMike Pagano <mpagano@gentoo.org>2023-08-11 10:50:50 -0400
commit696d4e722f7a578a34399089bad819ebb7496c09 (patch)
tree33ed78c9f34f5eb84a0c76ebe683810a22900f03
parentLinux patch 5.15.125 (diff)
downloadlinux-patches-696d4e722f7a578a34399089bad819ebb7496c09.tar.gz
linux-patches-696d4e722f7a578a34399089bad819ebb7496c09.tar.bz2
linux-patches-696d4e722f7a578a34399089bad819ebb7496c09.zip
Linux patch 5.15.1265.15-132
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1125_linux-5.15.126.patch10417
2 files changed, 10421 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 25b6fa31..15791807 100644
--- a/0000_README
+++ b/0000_README
@@ -543,6 +543,10 @@ Patch: 1124_linux-5.15.125.patch
From: https://www.kernel.org
Desc: Linux 5.15.125
+Patch: 1125_linux-5.15.126.patch
+From: https://www.kernel.org
+Desc: Linux 5.15.126
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1125_linux-5.15.126.patch b/1125_linux-5.15.126.patch
new file mode 100644
index 00000000..67e4ca02
--- /dev/null
+++ b/1125_linux-5.15.126.patch
@@ -0,0 +1,10417 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 076861b0f5ac1..83a75e16e54de 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -104,6 +104,10 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N1 | #1349291 | N/A |
+@@ -112,8 +116,16 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-500 | #841119,826419 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | MMU-600 | #1076982,1209401| N/A |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | MMU-700 | #2268618,2812531| N/A |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index a90f955e14ab3..42993220a57a3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 125
++SUBLEVEL = 126
+ EXTRAVERSION =
+ NAME = Trick or Treat
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index e5e35470647b6..5ab4b0520eabb 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -774,6 +774,80 @@ config ARM64_ERRATUM_2139208
+
+ If unsure, say Y.
+
++config ARM64_WORKAROUND_TSB_FLUSH_FAILURE
++ bool
++
++config ARM64_ERRATUM_2054223
++ bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace"
++ default y
++ select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
++ help
++ Enable workaround for ARM Cortex-A710 erratum 2054223
++
++ Affected cores may fail to flush the trace data on a TSB instruction, when
++ the PE is in trace prohibited state. This will cause losing a few bytes
++ of the trace cached.
++
++ Workaround is to issue two TSB consecutively on affected cores.
++
++ If unsure, say Y.
++
++config ARM64_ERRATUM_2067961
++ bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace"
++ default y
++ select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
++ help
++ Enable workaround for ARM Neoverse-N2 erratum 2067961
++
++ Affected cores may fail to flush the trace data on a TSB instruction, when
++ the PE is in trace prohibited state. This will cause losing a few bytes
++ of the trace cached.
++
++ Workaround is to issue two TSB consecutively on affected cores.
++
++ If unsure, say Y.
++
++config ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
++ bool
++
++config ARM64_ERRATUM_2253138
++ bool "Neoverse-N2: 2253138: workaround TRBE writing to address out-of-range"
++ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
++ depends on CORESIGHT_TRBE
++ default y
++ select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
++ help
++ This option adds the workaround for ARM Neoverse-N2 erratum 2253138.
++
++ Affected Neoverse-N2 cores might write to an out-of-range address, not reserved
++ for TRBE. Under some conditions, the TRBE might generate a write to the next
++ virtually addressed page following the last page of the TRBE address space
++ (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base.
++
++ Work around this in the driver by always making sure that there is a
++ page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE.
++
++ If unsure, say Y.
++
++config ARM64_ERRATUM_2224489
++ bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range"
++ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
++ depends on CORESIGHT_TRBE
++ default y
++ select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
++ help
++ This option adds the workaround for ARM Cortex-A710 erratum 2224489.
++
++ Affected Cortex-A710 cores might write to an out-of-range address, not reserved
++ for TRBE. Under some conditions, the TRBE might generate a write to the next
++ virtually addressed page following the last page of the TRBE address space
++ (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base.
++
++ Work around this in the driver by always making sure that there is a
++ page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+index 46e558ab7729b..f0e8af12442a4 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+@@ -129,7 +129,7 @@
+ status = "okay";
+ clock-frequency = <100000>;
+ i2c-sda-falling-time-ns = <890>; /* hcnt */
+- i2c-sdl-falling-time-ns = <890>; /* lcnt */
++ i2c-scl-falling-time-ns = <890>; /* lcnt */
+
+ adc@14 {
+ compatible = "lltc,ltc2497";
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+index f9b4a39683cf4..92ac3c86ebd56 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+@@ -162,7 +162,7 @@
+ status = "okay";
+ clock-frequency = <100000>;
+ i2c-sda-falling-time-ns = <890>; /* hcnt */
+- i2c-sdl-falling-time-ns = <890>; /* lcnt */
++ i2c-scl-falling-time-ns = <890>; /* lcnt */
+
+ adc@14 {
+ compatible = "lltc,ltc2497";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+index d053ef302fb82..faafefe562e4b 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+@@ -351,7 +351,7 @@
+ MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+- MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19
++ MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159
+ >;
+ };
+
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index 451e11e5fd23b..1c5a005984582 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -23,7 +23,7 @@
+ #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
+
+ #define psb_csync() asm volatile("hint #17" : : : "memory")
+-#define tsb_csync() asm volatile("hint #18" : : : "memory")
++#define __tsb_csync() asm volatile("hint #18" : : : "memory")
+ #define csdb() asm volatile("hint #20" : : : "memory")
+
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
+@@ -46,6 +46,20 @@
+ #define dma_rmb() dmb(oshld)
+ #define dma_wmb() dmb(oshst)
+
++
++#define tsb_csync() \
++ do { \
++ /* \
++ * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
++ * another TSB to ensure the trace is flushed. The barriers \
++ * don't have to be strictly back to back, as long as the \
++ * CPU is in trace prohibited state. \
++ */ \
++ if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
++ __tsb_csync(); \
++ __tsb_csync(); \
++ } while (0)
++
+ /*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index d810d4b7b438c..bf69a20bc27f9 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -375,6 +375,30 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
+ };
+ #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
+
++#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
++static const struct midr_range tsb_flush_fail_cpus[] = {
++#ifdef CONFIG_ARM64_ERRATUM_2067961
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_2054223
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++#endif
++ {},
++};
++#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
++
++#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
++static struct midr_range trbe_write_out_of_range_cpus[] = {
++#ifdef CONFIG_ARM64_ERRATUM_2253138
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_2224489
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++#endif
++ {},
++};
++#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ {
+@@ -606,6 +630,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
+ },
++#endif
++#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
++ {
++ .desc = "ARM erratum 2067961 or 2054223",
++ .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
++ ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
++ },
++#endif
++#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
++ {
++ .desc = "ARM erratum 2253138 or 2224489",
++ .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
++ CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
++ },
+ #endif
+ {
+ }
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index 32fe50a3a26c0..fcaeec5a51258 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -57,6 +57,8 @@ WORKAROUND_1542419
+ WORKAROUND_1742098
+ WORKAROUND_2457168
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
++WORKAROUND_TSB_FLUSH_FAILURE
++WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
+ WORKAROUND_CAVIUM_23154
+ WORKAROUND_CAVIUM_27456
+ WORKAROUND_CAVIUM_30115
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index f3f4710d4ff52..99129b0cd8b8a 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
+ return leading_zero_bits >> 3;
+ }
+
+-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
++static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+ {
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index b76cd49d521b9..db040f34c0046 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
+ start = ALIGN_DOWN(start, page_size);
+ if (altmap) {
+ alt_start = altmap->base_pfn;
+- alt_end = altmap->base_pfn + altmap->reserve +
+- altmap->free + altmap->alloc + altmap->align;
++ alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
+ }
+
+ pr_debug("vmemmap_free %lx...%lx\n", start, end);
+diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
+index 4d141e2c132e5..2ea7f208f0e73 100644
+--- a/arch/s390/kernel/sthyi.c
++++ b/arch/s390/kernel/sthyi.c
+@@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc)
+ *
+ * Fills the destination with system information returned by the STHYI
+ * instruction. The data is generated by emulation or execution of STHYI,
+- * if available. The return value is the condition code that would be
+- * returned, the rc parameter is the return code which is passed in
+- * register R2 + 1.
++ * if available. The return value is either a negative error value or
++ * the condition code that would be returned, the rc parameter is the
++ * return code which is passed in register R2 + 1.
+ */
+ int sthyi_fill(void *dst, u64 *rc)
+ {
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index aeb0e0865e890..458b42b50b8cb 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
+ */
+ int handle_sthyi(struct kvm_vcpu *vcpu)
+ {
+- int reg1, reg2, r = 0;
+- u64 code, addr, cc = 0, rc = 0;
++ int reg1, reg2, cc = 0, r = 0;
++ u64 code, addr, rc = 0;
+ struct sthyi_sctns *sctns = NULL;
+
+ if (!test_kvm_facility(vcpu->kvm, 74))
+@@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ return -ENOMEM;
+
+ cc = sthyi_fill(sctns, &rc);
+-
++ if (cc < 0) {
++ free_page((unsigned long)sctns);
++ return cc;
++ }
+ out:
+ if (!cc) {
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 608ffc45fc0e1..d6089072ee41f 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -465,4 +465,5 @@
+
+ /* BUG word 2 */
+ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
++#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 747ccc2ae383f..aec714ea82302 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -804,10 +804,12 @@ extern u16 get_llc_id(unsigned int cpu);
+ extern u32 amd_get_nodes_per_socket(void);
+ extern u32 amd_get_highest_perf(void);
+ extern bool cpu_has_ibpb_brtype_microcode(void);
++extern void amd_clear_divider(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void) { return 0; }
+ static inline u32 amd_get_highest_perf(void) { return 0; }
+ static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
++static inline void amd_clear_divider(void) { }
+ #endif
+
+ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 1b90eb6ea5030..77f4dfb0662eb 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -75,6 +75,10 @@ static const int amd_zenbleed[] =
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+
++static const int amd_div0[] =
++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
++ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
++
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+ int osvw_id = *erratum++;
+@@ -1140,6 +1144,11 @@ static void init_amd(struct cpuinfo_x86 *c)
+ check_null_seg_clears_base(c);
+
+ zenbleed_check(c);
++
++ if (cpu_has_amd_erratum(c, amd_div0)) {
++ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
++ setup_force_cpu_bug(X86_BUG_DIV0);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+@@ -1300,3 +1309,13 @@ void amd_check_microcode(void)
+ {
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
++
++/*
++ * Issue a DIV 0/1 insn to clear any division data from previous DIV
++ * operations.
++ */
++void noinstr amd_clear_divider(void)
++{
++ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
++ :: "a" (0), "d" (0), "r" (1));
++}
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index ca47080e37741..3361d32d090f8 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -202,6 +202,8 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ FPE_INTDIV, error_get_trap_addr(regs));
++
++ amd_clear_divider();
+ }
+
+ DEFINE_IDTENTRY(exc_overflow)
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 54292cdd7808b..922ed457db191 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev);
+
+ #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+ #define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
++#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
+ #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+- WAKE_IRQ_DEDICATED_MANAGED)
++ WAKE_IRQ_DEDICATED_MANAGED | \
++ WAKE_IRQ_DEDICATED_REVERSE)
++#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
+
+ struct wake_irq {
+ struct device *dev;
+@@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+ extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+ extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+-extern void dev_pm_disable_wake_irq_check(struct device *dev);
++extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
++extern void dev_pm_enable_wake_irq_complete(struct device *dev);
+
+ #ifdef CONFIG_PM_SLEEP
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index c1142a7a4fe65..5824d41a0b745 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -673,6 +673,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ if (retval)
+ goto fail;
+
++ dev_pm_enable_wake_irq_complete(dev);
++
+ no_callback:
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
+@@ -718,7 +720,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ return retval;
+
+ fail:
+- dev_pm_disable_wake_irq_check(dev);
++ dev_pm_disable_wake_irq_check(dev, true);
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.deferred_resume = false;
+ wake_up_all(&dev->power.wait_queue);
+@@ -901,7 +903,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+- dev_pm_disable_wake_irq_check(dev);
++ dev_pm_disable_wake_irq_check(dev, false);
+ retval = rpm_callback(callback, dev);
+ if (retval) {
+ __update_runtime_status(dev, RPM_SUSPENDED);
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index b91a3a9bf9f6d..6f2cdd8643afa 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+ return IRQ_HANDLED;
+ }
+
+-/**
+- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+- * @dev: Device entry
+- * @irq: Device wake-up interrupt
+- *
+- * Unless your hardware has separate wake-up interrupts in addition
+- * to the device IO interrupts, you don't need this.
+- *
+- * Sets up a threaded interrupt handler for a device that has
+- * a dedicated wake-up interrupt in addition to the device IO
+- * interrupt.
+- *
+- * The interrupt starts disabled, and needs to be managed for
+- * the device by the bus code or the device driver using
+- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
+- * functions.
+- */
+-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
++static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
+ {
+ struct wake_irq *wirq;
+ int err;
+@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+ if (err)
+ goto err_free_irq;
+
+- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
++ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
+
+ return err;
+
+@@ -210,8 +193,57 @@ err_free:
+
+ return err;
+ }
++
++
++/**
++ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
++ * @dev: Device entry
++ * @irq: Device wake-up interrupt
++ *
++ * Unless your hardware has separate wake-up interrupts in addition
++ * to the device IO interrupts, you don't need this.
++ *
++ * Sets up a threaded interrupt handler for a device that has
++ * a dedicated wake-up interrupt in addition to the device IO
++ * interrupt.
++ *
++ * The interrupt starts disabled, and needs to be managed for
++ * the device by the bus code or the device driver using
++ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
++ * functions.
++ */
++int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
++{
++ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
++}
+ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
+
++/**
++ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
++ * with reverse enable ordering
++ * @dev: Device entry
++ * @irq: Device wake-up interrupt
++ *
++ * Unless your hardware has separate wake-up interrupts in addition
++ * to the device IO interrupts, you don't need this.
++ *
++ * Sets up a threaded interrupt handler for a device that has a dedicated
++ * wake-up interrupt in addition to the device IO interrupt. It sets
++ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
++ * to enable dedicated wake-up interrupt after running the runtime suspend
++ * callback for @dev.
++ *
++ * The interrupt starts disabled, and needs to be managed for
++ * the device by the bus code or the device driver using
++ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
++ * functions.
++ */
++int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
++{
++ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
++}
++EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
++
+ /**
+ * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+ * @dev: Device
+@@ -282,25 +314,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
+ return;
+
+ enable:
+- enable_irq(wirq->irq);
++ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
++ enable_irq(wirq->irq);
++ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++ }
+ }
+
+ /**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
++ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+-void dev_pm_disable_wake_irq_check(struct device *dev)
++void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+ {
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+- if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
++ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
++ return;
++
++ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
++ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
+ disable_irq_nosync(wirq->irq);
++ }
++}
++
++/**
++ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
++ * @dev: Device using the wake IRQ
++ *
++ * Enable wake IRQ conditionally based on status, mainly used if want to
++ * enable wake IRQ after running ->runtime_suspend() which depends on
++ * WAKE_IRQ_DEDICATED_REVERSE.
++ *
++ * Should be only called from rpm_suspend() path.
++ */
++void dev_pm_enable_wake_irq_complete(struct device *dev)
++{
++ struct wake_irq *wirq = dev->power.wakeirq;
++
++ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
++ return;
++
++ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
++ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
++ enable_irq(wirq->irq);
+ }
+
+ /**
+@@ -317,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+
+ if (device_may_wakeup(wirq->dev)) {
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+- !pm_runtime_status_suspended(wirq->dev))
++ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ enable_irq(wirq->irq);
+
+ enable_irq_wake(wirq->irq);
+@@ -340,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+ disable_irq_wake(wirq->irq);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+- !pm_runtime_status_suspended(wirq->dev))
++ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ disable_irq_nosync(wirq->irq);
+ }
+ }
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index c269c552a43aa..fe8bdbf4616bc 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3677,7 +3677,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
+ ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
+ RBD_LOCK_TAG, "", 0);
+- if (ret)
++ if (ret && ret != -EEXIST)
+ return ret;
+
+ __rbd_lock(rbd_dev, cookie);
+@@ -3880,7 +3880,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ &lock_type, &lock_tag, &lockers, &num_lockers);
+ if (ret) {
+- rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
++ rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
+ return ERR_PTR(ret);
+ }
+
+@@ -3942,8 +3942,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
+ ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, &watchers,
+ &num_watchers);
+- if (ret)
++ if (ret) {
++ rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
+ return ret;
++ }
+
+ sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+ for (i = 0; i < num_watchers; i++) {
+@@ -3987,8 +3989,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ locker = refreshed_locker = NULL;
+
+ ret = rbd_lock(rbd_dev);
+- if (ret != -EBUSY)
++ if (!ret)
++ goto out;
++ if (ret != -EBUSY) {
++ rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+ goto out;
++ }
+
+ /* determine if the current lock holder is still alive */
+ locker = get_lock_owner_info(rbd_dev);
+@@ -4091,11 +4097,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
+
+ ret = rbd_try_lock(rbd_dev);
+ if (ret < 0) {
+- rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+- if (ret == -EBLOCKLISTED)
+- goto out;
+-
+- ret = 1; /* request lock anyway */
++ rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
++ goto out;
+ }
+ if (ret > 0) {
+ up_write(&rbd_dev->lock_rwsem);
+@@ -6631,12 +6634,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ if (!ret)
+ ret = -ETIMEDOUT;
+- }
+
+- if (ret) {
+- rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
+- return ret;
++ rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
+ }
++ if (ret)
++ return ret;
+
+ /*
+ * The lock may have been released by now, unless automatic lock
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index ed9b83aee8bd7..d1400de17eca7 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -106,8 +106,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+- if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
++ if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
++ of_node_put(shmem);
+ return -ENXIO;
++ }
+
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index 4effecc3bb463..ea1caf70e8df9 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -76,8 +76,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+- if (!of_device_is_compatible(np, "arm,scmi-shmem"))
++ if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
++ of_node_put(np);
+ return -ENXIO;
++ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
+index f7863d6dea804..ba5b16618c237 100644
+--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
+@@ -311,7 +311,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
+ sig_cfg.mode.hactive, new_hactive);
+
+- sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
++ sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
+ sig_cfg.mode.hactive = new_hactive;
+ }
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 4d0ef5ab25319..391ed462f7fbb 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -606,7 +606,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+
+ if (bo->pin_count) {
+ *locked = false;
+- *busy = false;
++ if (busy)
++ *busy = false;
+ return false;
+ }
+
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index bcdb2cbdda971..340ef116d574a 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -897,6 +897,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+ {
++ if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
++ (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
++ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
++ cmds->num = 0;
++ }
++
+ if (cmds->num == CMDQ_BATCH_ENTRIES) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
+ cmds->num = 0;
+@@ -3459,6 +3465,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
+ return 0;
+ }
+
++#define IIDR_IMPLEMENTER_ARM 0x43b
++#define IIDR_PRODUCTID_ARM_MMU_600 0x483
++#define IIDR_PRODUCTID_ARM_MMU_700 0x487
++
++static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
++{
++ u32 reg;
++ unsigned int implementer, productid, variant, revision;
++
++ reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
++ implementer = FIELD_GET(IIDR_IMPLEMENTER, reg);
++ productid = FIELD_GET(IIDR_PRODUCTID, reg);
++ variant = FIELD_GET(IIDR_VARIANT, reg);
++ revision = FIELD_GET(IIDR_REVISION, reg);
++
++ switch (implementer) {
++ case IIDR_IMPLEMENTER_ARM:
++ switch (productid) {
++ case IIDR_PRODUCTID_ARM_MMU_600:
++ /* Arm erratum 1076982 */
++ if (variant == 0 && revision <= 2)
++ smmu->features &= ~ARM_SMMU_FEAT_SEV;
++ /* Arm erratum 1209401 */
++ if (variant < 2)
++ smmu->features &= ~ARM_SMMU_FEAT_NESTING;
++ break;
++ case IIDR_PRODUCTID_ARM_MMU_700:
++ /* Arm erratum 2812531 */
++ smmu->features &= ~ARM_SMMU_FEAT_BTM;
++ smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
++ /* Arm errata 2268618, 2812531 */
++ smmu->features &= ~ARM_SMMU_FEAT_NESTING;
++ break;
++ }
++ break;
++ }
++}
++
+ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+ {
+ u32 reg;
+@@ -3664,6 +3708,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+
+ smmu->ias = max(smmu->ias, smmu->oas);
+
++ if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
++ (smmu->features & ARM_SMMU_FEAT_TRANS_S2))
++ smmu->features |= ARM_SMMU_FEAT_NESTING;
++
++ arm_smmu_device_iidr_probe(smmu);
++
+ if (arm_smmu_sva_supported(smmu))
+ smmu->features |= ARM_SMMU_FEAT_SVA;
+
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+index 4cb136f07914e..c594a9b469995 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+@@ -69,6 +69,12 @@
+ #define IDR5_VAX GENMASK(11, 10)
+ #define IDR5_VAX_52_BIT 1
+
++#define ARM_SMMU_IIDR 0x18
++#define IIDR_PRODUCTID GENMASK(31, 20)
++#define IIDR_VARIANT GENMASK(19, 16)
++#define IIDR_REVISION GENMASK(15, 12)
++#define IIDR_IMPLEMENTER GENMASK(11, 0)
++
+ #define ARM_SMMU_CR0 0x20
+ #define CR0_ATSCHK (1 << 4)
+ #define CR0_CMDQEN (1 << 3)
+@@ -640,11 +646,13 @@ struct arm_smmu_device {
+ #define ARM_SMMU_FEAT_BTM (1 << 16)
+ #define ARM_SMMU_FEAT_SVA (1 << 17)
+ #define ARM_SMMU_FEAT_E2H (1 << 18)
++#define ARM_SMMU_FEAT_NESTING (1 << 19)
+ u32 features;
+
+ #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+ #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
+ #define ARM_SMMU_OPT_MSIPOLL (1 << 2)
++#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
+ u32 options;
+
+ struct arm_smmu_cmdq cmdq;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index eba58b99cd29d..d6cf01c32a33d 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
+ *z1t = cpu_to_le16(new_z1); /* now send data */
+ if (bch->tx_idx < bch->tx_skb->len)
+ return;
+- dev_kfree_skb(bch->tx_skb);
++ dev_kfree_skb_any(bch->tx_skb);
+ if (get_next_bframe(bch))
+ goto next_t_frame;
+ return;
+@@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
+ }
+ bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
+ bz->f1 = new_f1; /* next frame */
+- dev_kfree_skb(bch->tx_skb);
++ dev_kfree_skb_any(bch->tx_skb);
+ get_next_bframe(bch);
+ }
+
+@@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
+ if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
+ hfcpci_fill_fifo(bch);
+ else {
+- dev_kfree_skb(bch->tx_skb);
++ dev_kfree_skb_any(bch->tx_skb);
+ if (get_next_bframe(bch))
+ hfcpci_fill_fifo(bch);
+ }
+@@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
+ return 0;
+
+ if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
+- spin_lock(&hc->lock);
++ spin_lock_irq(&hc->lock);
+ bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+ if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
+ main_rec_hfcpci(bch);
+@@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
+ main_rec_hfcpci(bch);
+ tx_birq(bch);
+ }
+- spin_unlock(&hc->lock);
++ spin_unlock_irq(&hc->lock);
+ }
+ return 0;
+ }
+diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
+index b3cc427100a22..636e65328bb32 100644
+--- a/drivers/mtd/nand/raw/fsl_upm.c
++++ b/drivers/mtd/nand/raw/fsl_upm.c
+@@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ unsigned int i;
+ int ret;
+
+- if (op->cs > NAND_MAX_CHIPS)
++ if (op->cs >= NAND_MAX_CHIPS)
+ return -EINVAL;
+
+ if (check_only)
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index bb256a3bb9beb..9d441965321ad 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1180,7 +1180,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+- int nsectors = mtd->writesize / 1024;
+ int ret;
+
+ if (!mtd->name) {
+@@ -1198,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+- mtd->oobsize - 2 * nsectors);
++ mtd->oobsize - 2);
+ if (ret) {
+ dev_err(nfc->dev, "failed to ECC init\n");
+ return -EINVAL;
+diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
+index 2b21ce04b3ec6..1a48347be3fe4 100644
+--- a/drivers/mtd/nand/raw/omap_elm.c
++++ b/drivers/mtd/nand/raw/omap_elm.c
+@@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info,
+ switch (info->bch_type) {
+ case BCH8_ECC:
+ /* syndrome fragment 0 = ecc[9-12B] */
+- val = cpu_to_be32(*(u32 *) &ecc[9]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[5]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[1]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+@@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info,
+ break;
+ case BCH4_ECC:
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+- val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
++ val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH16_ECC:
+- val = cpu_to_be32(*(u32 *) &ecc[22]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[18]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[14]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[10]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[6]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[2]);
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+- val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
++ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
+ default:
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index b5405bc7ca3a3..99242bd684375 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
+ *
+ * The rk_nfc_ooblayout_free() function already has reserved
+- * these 4 bytes with:
++ * these 4 bytes together with 2 bytes for BBM
++ * by reducing it's length:
+ *
+- * oob_region->offset = NFC_SYS_DATA_SIZE + 2;
++ * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ */
+ if (!i)
+ memcpy(rk_nfc_oob_ptr(chip, i),
+@@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ int ret = 0, i, boot_rom_mode = 0;
+ dma_addr_t dma_data, dma_oob;
+- u32 reg;
++ u32 tmp;
+ u8 *oob;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+@@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
++ * The code here just swaps the first 4 bytes with the last
++ * 4 bytes without losing any data.
++ *
++ * The chip->oob_poi data layout:
++ *
++ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
++ *
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+@@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ }
+
+ for (i = 0; i < ecc->steps; i++) {
+- if (!i) {
+- reg = 0xFFFFFFFF;
+- } else {
++ if (!i)
++ oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
++ else
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+- reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
+- oob[3] << 24;
+- }
+
+- if (!i && boot_rom_mode)
+- reg = (page & (pages_per_blk - 1)) * 4;
++ tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
+
+ if (nfc->cfg->type == NFC_V9)
+- nfc->oob_buf[i] = reg;
++ nfc->oob_buf[i] = tmp;
+ else
+- nfc->oob_buf[i * (oob_step / 4)] = reg;
++ nfc->oob_buf[i * (oob_step / 4)] = tmp;
+ }
+
+ dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
+@@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
+ goto timeout_err;
+ }
+
+- for (i = 1; i < ecc->steps; i++) {
+- oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
++ for (i = 0; i < ecc->steps; i++) {
++ if (!i)
++ oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
++ else
++ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
++
+ if (nfc->cfg->type == NFC_V9)
+ tmp = nfc->oob_buf[i];
+ else
+ tmp = nfc->oob_buf[i * (oob_step / 4)];
++
+ *oob++ = (u8)tmp;
+ *oob++ = (u8)(tmp >> 8);
+ *oob++ = (u8)(tmp >> 16);
+@@ -935,12 +944,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ if (section)
+ return -ERANGE;
+
+- /*
+- * The beginning of the OOB area stores the reserved data for the NFC,
+- * the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
+- */
+ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+- oob_region->offset = NFC_SYS_DATA_SIZE + 2;
++ oob_region->offset = 2;
+
+ return 0;
+ }
+diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
+index 7380b1ebaccd5..a80427c131216 100644
+--- a/drivers/mtd/nand/spi/toshiba.c
++++ b/drivers/mtd/nand/spi/toshiba.c
+@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ {
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
++ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+@@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ if (spi_mem_exec_op(spinand->spimem, &op))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+- mbf >>= 4;
++ mbf = *(spinand->scratchbuf) >> 4;
+
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index d76b2377d66ef..773d751ef169f 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1422,7 +1422,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret)
++ return ret;
+
+ priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ if (IS_ERR(priv->clk_mdiv)) {
+@@ -1430,7 +1432,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ goto out_clk;
+ }
+
+- clk_prepare_enable(priv->clk_mdiv);
++ ret = clk_prepare_enable(priv->clk_mdiv);
++ if (ret)
++ goto out_clk;
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index df9a8eefa007a..916e85039b610 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -1301,11 +1301,10 @@ static int korina_probe(struct platform_device *pdev)
+ else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
+ eth_hw_addr_random(dev);
+
+- clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
++ clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (clk) {
+- clk_prepare_enable(clk);
+ lp->mii_clock_freq = clk_get_rate(clk);
+ } else {
+ lp->mii_clock_freq = 200000000; /* max possible input clk */
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+index a8d7b889ebeee..6bef633aa6330 100644
+--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
++++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+@@ -645,7 +645,8 @@ pick_fw_ver:
+
+ err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
+ if (err) {
+- if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
++ if (ver_maj != PRESTERA_PREV_FW_MAJ_VER ||
++ ver_min != PRESTERA_PREV_FW_MIN_VER) {
+ ver_maj = PRESTERA_PREV_FW_MAJ_VER;
+ ver_min = PRESTERA_PREV_FW_MIN_VER;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+index b56fea142c246..4590d19c25cf7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+@@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
+
+ trailer_len = alen + plen + 2;
+
+- pskb_trim(skb, skb->len - trailer_len);
++ ret = pskb_trim(skb, skb->len - trailer_len);
++ if (unlikely(ret))
++ return ret;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
+ ip_send_check(ipv4hdr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 605c8ecc3610f..ccccbac044287 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -981,7 +981,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
+ mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
+- mlx5_irq_table_destroy(dev);
++ mlx5_irq_table_free_irqs(dev);
+ mutex_unlock(&table->lock);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index cb3f9de3d00ba..161ad2ae40196 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -802,7 +802,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
+ struct fs_node *iter = list_entry(start, struct fs_node, list);
+ struct mlx5_flow_table *ft = NULL;
+
+- if (!root || root->type == FS_TYPE_PRIO_CHAINS)
++ if (!root)
+ return NULL;
+
+ list_for_each_advance_continue(iter, &root->children, reverse) {
+@@ -818,20 +818,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
+ return ft;
+ }
+
+-/* If reverse is false then return the first flow table in next priority of
+- * prio in the tree, else return the last flow table in the previous priority
+- * of prio in the tree.
++static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
++ struct fs_node **child)
++{
++ struct fs_node *node = NULL;
++
++ while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
++ node = parent;
++ parent = parent->parent;
++ }
++
++ if (child)
++ *child = node;
++
++ return parent;
++}
++
++/* If reverse is false then return the first flow table next to the passed node
++ * in the tree, else return the last flow table before the node in the tree.
++ * If skip is true, skip the flow tables in the same prio_chains prio.
+ */
+-static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
++static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
++ bool skip)
+ {
++ struct fs_node *prio_chains_parent = NULL;
+ struct mlx5_flow_table *ft = NULL;
+ struct fs_node *curr_node;
+ struct fs_node *parent;
+
+- parent = prio->node.parent;
+- curr_node = &prio->node;
++ if (skip)
++ prio_chains_parent = find_prio_chains_parent(node, NULL);
++ parent = node->parent;
++ curr_node = node;
+ while (!ft && parent) {
+- ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
++ if (parent != prio_chains_parent)
++ ft = find_closest_ft_recursive(parent, &curr_node->list,
++ reverse);
+ curr_node = parent;
+ parent = curr_node->parent;
+ }
+@@ -839,15 +861,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers
+ }
+
+ /* Assuming all the tree is locked by mutex chain lock */
+-static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
++static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
+ {
+- return find_closest_ft(prio, false);
++ return find_closest_ft(node, false, true);
+ }
+
+ /* Assuming all the tree is locked by mutex chain lock */
+-static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
++static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
+ {
+- return find_closest_ft(prio, true);
++ return find_closest_ft(node, true, true);
+ }
+
+ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+@@ -859,7 +881,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
+
+- return find_next_chained_ft(prio);
++ return find_next_chained_ft(&prio->node);
+ }
+
+ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+@@ -883,21 +905,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+ return 0;
+ }
+
++static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
++ struct fs_node *parent,
++ struct fs_node **child,
++ bool reverse)
++{
++ struct mlx5_flow_table *ft;
++
++ ft = find_closest_ft(node, reverse, false);
++
++ if (ft && parent == find_prio_chains_parent(&ft->node, child))
++ return ft;
++
++ return NULL;
++}
++
+ /* Connect flow tables from previous priority of prio to ft */
+ static int connect_prev_fts(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+ {
++ struct fs_node *prio_parent, *parent = NULL, *child, *node;
+ struct mlx5_flow_table *prev_ft;
++ int err = 0;
++
++ prio_parent = find_prio_chains_parent(&prio->node, &child);
++
++ /* return directly if not under the first sub ns of prio_chains prio */
++ if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
++ return 0;
+
+- prev_ft = find_prev_chained_ft(prio);
+- if (prev_ft) {
++ prev_ft = find_prev_chained_ft(&prio->node);
++ while (prev_ft) {
+ struct fs_prio *prev_prio;
+
+ fs_get_obj(prev_prio, prev_ft->node.parent);
+- return connect_fts_in_prio(dev, prev_prio, ft);
++ err = connect_fts_in_prio(dev, prev_prio, ft);
++ if (err)
++ break;
++
++ if (!parent) {
++ parent = find_prio_chains_parent(&prev_prio->node, &child);
++ if (!parent)
++ break;
++ }
++
++ node = child;
++ prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
+ }
+- return 0;
++ return err;
+ }
+
+ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
+@@ -1036,7 +1092,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
+ if (err)
+ return err;
+
+- next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
++ next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
+ err = connect_fwd_rules(dev, ft, next_ft);
+ if (err)
+ return err;
+@@ -1111,7 +1167,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
+
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
+ next_ft = unmanaged ? ft_attr->next_ft :
+- find_next_chained_ft(fs_prio);
++ find_next_chained_ft(&fs_prio->node);
+ ft->def_miss_action = ns->def_miss_action;
+ ft->ns = ns;
+ err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
+@@ -2080,13 +2136,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules);
+ /* Assuming prio->node.children(flow tables) is sorted by level */
+ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
+ {
++ struct fs_node *prio_parent, *child;
+ struct fs_prio *prio;
+
+ fs_get_obj(prio, ft->node.parent);
+
+ if (!list_is_last(&ft->node.list, &prio->node.children))
+ return list_next_entry(ft, node.list);
+- return find_next_chained_ft(prio);
++
++ prio_parent = find_prio_chains_parent(&prio->node, &child);
++
++ if (prio_parent && list_is_first(&child->list, &prio_parent->children))
++ return find_closest_ft(&prio->node, false, false);
++
++ return find_next_chained_ft(&prio->node);
+ }
+
+ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+index abd024173c42e..8cf40a3658d99 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+@@ -16,6 +16,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
++void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
+ int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
+ struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 11f3649fdaab1..df16dc35bb04c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -550,6 +550,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
+ irq_pool_free(table->pf_pool);
+ }
+
++static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
++{
++ struct mlx5_irq *irq;
++ unsigned long index;
++
++ xa_for_each(&pool->irqs, index, irq)
++ free_irq(irq->irqn, &irq->nh);
++}
++
++static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
++{
++ if (table->sf_ctrl_pool) {
++ mlx5_irq_pool_free_irqs(table->sf_comp_pool);
++ mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
++ }
++ mlx5_irq_pool_free_irqs(table->pf_pool);
++}
++
+ /* irq_table API */
+
+ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
+@@ -630,6 +648,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
+ pci_free_irq_vectors(dev->pdev);
+ }
+
++void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
++{
++ struct mlx5_irq_table *table = dev->priv.irq_table;
++
++ if (mlx5_core_is_sf(dev))
++ return;
++
++ mlx5_irq_pools_free_irqs(table);
++ pci_free_irq_vectors(dev->pdev);
++}
++
+ int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
+ {
+ if (table->sf_comp_pool)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+index fcf705ce421f3..aa003a75946bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+@@ -528,11 +528,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (err)
+- return err;
++ goto err_free_in;
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+- kvfree(in);
+
++err_free_in:
++ kvfree(in);
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index d58e021614cd0..b656408b9d700 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -877,12 +877,13 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
+
+
+ /**
+- * @brief qed_concrete_to_sw_fid - get the sw function id from
+- * the concrete value.
++ * qed_concrete_to_sw_fid(): Get the sw function id from
++ * the concrete value.
+ *
+- * @param concrete_fid
++ * @cdev: Qed dev pointer.
++ * @concrete_fid: Concrete fid.
+ *
+- * @return inline u8
++ * Return: inline u8.
+ */
+ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+index 8adb7ed0c12db..d31196db7bdde 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+@@ -28,24 +28,23 @@ struct qed_tid_mem {
+ };
+
+ /**
+- * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
++ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
+ *
++ * @p_hwfn: HW device data.
++ * @p_info: In/out.
+ *
+- * @param p_hwfn
+- * @param p_info in/out
+- *
+- * @return int
++ * Return: Int.
+ */
+ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+ /**
+- * @brief qed_cxt_get_tid_mem_info
++ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
+ *
+- * @param p_hwfn
+- * @param p_info
++ * @p_hwfn: HW device data.
++ * @p_info: in/out.
+ *
+- * @return int
++ * Return: int.
+ */
+ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info);
+@@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
+ /**
+- * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
++ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
++ *
++ * @p_hwfn: HW device data.
++ * @rdma_tasks: Requested maximum.
+ *
+- * @param p_hwfn
+- * @param rdma_tasks - requested maximum
+- * @return int
++ * Return: int.
+ */
+ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
+
+ /**
+- * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
++ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
+ *
+- * @param p_hwfn
+- * @param last_line
++ * @p_hwfn: HW device data.
++ * @last_line: Last_line.
+ *
+- * @return int
++ * Return: Int
+ */
+ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+ /**
+- * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
++ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
++ *
++ * @p_hwfn: HW device data.
++ * @used_lines: Used lines.
+ *
+- * @param p_hwfn
+- * @param used_lines
++ * Return: Int.
+ */
+ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
+
+ /**
+- * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
++ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_cxt_mngr_free
++ * qed_cxt_mngr_free() - Context manager free.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
++ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
++ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ */
+ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+- *
++ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
+ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
++ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Void.
+ */
+ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
++ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @is_pf_loading: Is pf pending.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param is_pf_loading
++ * Return: Void.
+ */
+ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading);
+
+ /**
+- * @brief Reconfigures QM pf on the fly
++ * qed_qm_reconf(): Reconfigures QM pf on the fly.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ #define QED_CXT_PF_CID (0xff)
+
+ /**
+- * @brief qed_cxt_release - Release a cid
++ * qed_cxt_release_cid(): Release a cid.
+ *
+- * @param p_hwfn
+- * @param cid
++ * @p_hwfn: HW device data.
++ * @cid: Cid.
++ *
++ * Return: Void.
+ */
+ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
+
+ /**
+- * @brief qed_cxt_release - Release a cid belonging to a vf-queue
++ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
++ *
++ * @p_hwfn: HW device data.
++ * @cid: Cid.
++ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
+ *
+- * @param p_hwfn
+- * @param cid
+- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
++ * Return: Void.
+ */
+ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
+
+ /**
+- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
++ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
+ *
+- * @param p_hwfn
+- * @param type
+- * @param p_cid
++ * @p_hwfn: HW device data.
++ * @type: Type.
++ * @p_cid: Pointer cid.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid);
+
+ /**
+- * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
+- * for a vf-queue
++ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
++ * for a vf-queue.
+ *
+- * @param p_hwfn
+- * @param type
+- * @param p_cid
+- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
++ * @p_hwfn: HW device data.
++ * @type: Type.
++ * @p_cid: Pointer cid.
++ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+index d3c1f3879be87..a0a766a1723cc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+@@ -15,44 +15,52 @@
+ #include "qed_int.h"
+
+ /**
+- * @brief qed_init_dp - initialize the debug level
++ * qed_init_dp(): Initialize the debug level.
+ *
+- * @param cdev
+- * @param dp_module
+- * @param dp_level
++ * @cdev: Qed dev pointer.
++ * @dp_module: Module debug parameter.
++ * @dp_level: Module debug level.
++ *
++ * Return: Void.
+ */
+ void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+ /**
+- * @brief qed_init_struct - initialize the device structure to
+- * its defaults
++ * qed_init_struct(): Initialize the device structure to
++ * its defaults.
++ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: Void.
+ */
+ void qed_init_struct(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_resc_free -
++ * qed_resc_free: Free device resources.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
++ *
++ * Return: Void.
+ */
+ void qed_resc_free(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_resc_alloc -
++ * qed_resc_alloc(): Alloc device resources.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_resc_alloc(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_resc_setup -
++ * qed_resc_setup(): Setup device resources.
++ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: Void.
+ */
+ void qed_resc_setup(struct qed_dev *cdev);
+
+@@ -105,94 +113,113 @@ struct qed_hw_init_params {
+ };
+
+ /**
+- * @brief qed_hw_init -
++ * qed_hw_init(): Init Qed hardware.
+ *
+- * @param cdev
+- * @param p_params
++ * @cdev: Qed dev pointer.
++ * @p_params: Pointers to params.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
+
+ /**
+- * @brief qed_hw_timers_stop_all - stop the timers HW block
++ * qed_hw_timers_stop_all(): Stop the timers HW block.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return void
++ * Return: void.
+ */
+ void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_hw_stop -
++ * qed_hw_stop(): Stop Qed hardware.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: int.
+ */
+ int qed_hw_stop(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_hw_stop_fastpath -should be called incase
+- * slowpath is still required for the device,
+- * but fastpath is not.
++ * qed_hw_stop_fastpath(): Should be called incase
++ * slowpath is still required for the device,
++ * but fastpath is not.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_hw_start_fastpath -restart fastpath traffic,
+- * only if hw_stop_fastpath was called
++ * qed_hw_start_fastpath(): Restart fastpath traffic,
++ * only if hw_stop_fastpath was called.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+
+ /**
+- * @brief qed_hw_prepare -
++ * qed_hw_prepare(): Prepare Qed hardware.
+ *
+- * @param cdev
+- * @param personality - personality to initialize
++ * @cdev: Qed dev pointer.
++ * @personality: Personality to initialize.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+ /**
+- * @brief qed_hw_remove -
++ * qed_hw_remove(): Remove Qed hardware.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
++ *
++ * Return: Void.
+ */
+ void qed_hw_remove(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_ptt_acquire - Allocate a PTT window
++ * qed_ptt_acquire(): Allocate a PTT window.
+ *
+- * Should be called at the entry point to the driver (at the beginning of an
+- * exported function)
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: struct qed_ptt.
+ *
+- * @return struct qed_ptt
++ * Should be called at the entry point to the driver (at the beginning of an
++ * exported function).
+ */
+ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ptt_release - Release PTT Window
++ * qed_ptt_acquire_context(): Allocate a PTT window honoring the context
++ * atomicy.
+ *
+- * Should be called at the end of a flow - at the end of the function that
+- * acquired the PTT.
++ * @p_hwfn: HW device data.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: struct qed_ptt.
++ *
++ * Should be called at the entry point to the driver
++ * (at the beginning of an exported function).
++ */
++struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
++ bool is_atomic);
++
++/**
++ * qed_ptt_release(): Release PTT Window.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
++ * Return: Void.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Should be called at the end of a flow - at the end of the function that
++ * acquired the PTT.
+ */
+ void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+@@ -205,15 +232,17 @@ enum qed_dmae_address_type_t {
+ };
+
+ /**
+- * @brief qed_dmae_host2grc - copy data from source addr to
+- * dmae registers using the given ptt
++ * qed_dmae_host2grc(): Copy data from source addr to
++ * dmae registers using the given ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param source_addr
+- * @param grc_addr (dmae_data_offset)
+- * @param size_in_dwords
+- * @param p_params (default parameters will be used in case of NULL)
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @source_addr: Source address.
++ * @grc_addr: GRC address (dmae_data_offset).
++ * @size_in_dwords: Size.
++ * @p_params: (default parameters will be used in case of NULL).
++ *
++ * Return: Int.
+ */
+ int
+ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+@@ -224,29 +253,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_dmae_params *p_params);
+
+ /**
+- * @brief qed_dmae_grc2host - Read data from dmae data offset
+- * to source address using the given ptt
++ * qed_dmae_grc2host(): Read data from dmae data offset
++ * to source address using the given ptt.
++ *
++ * @p_ptt: P_ptt.
++ * @grc_addr: GRC address (dmae_data_offset).
++ * @dest_addr: Destination Address.
++ * @size_in_dwords: Size.
++ * @p_params: (default parameters will be used in case of NULL).
+ *
+- * @param p_ptt
+- * @param grc_addr (dmae_data_offset)
+- * @param dest_addr
+- * @param size_in_dwords
+- * @param p_params (default parameters will be used in case of NULL)
++ * Return: Int.
+ */
+ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params);
+
+ /**
+- * @brief qed_dmae_host2host - copy data from to source address
+- * to a destination adress (for SRIOV) using the given ptt
++ * qed_dmae_host2host(): Copy data from to source address
++ * to a destination adrress (for SRIOV) using the given
++ * ptt.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @source_addr: Source address.
++ * @dest_addr: Destination address.
++ * @size_in_dwords: size.
++ * @p_params: (default parameters will be used in case of NULL).
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param source_addr
+- * @param dest_addr
+- * @param size_in_dwords
+- * @param p_params (default parameters will be used in case of NULL)
++ * Return: Int.
+ */
+ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -259,51 +293,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
+
+ /**
+- * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
++ * qed_fw_l2_queue(): Get absolute L2 queue ID.
+ *
+- * @param p_hwfn
+- * @param src_id - relative to p_hwfn
+- * @param dst_id - absolute per engine
++ * @p_hwfn: HW device data.
++ * @src_id: Relative to p_hwfn.
++ * @dst_id: Absolute per engine.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+ /**
+- * @@brief qed_fw_vport - Get absolute vport ID
++ * qed_fw_vport(): Get absolute vport ID.
+ *
+- * @param p_hwfn
+- * @param src_id - relative to p_hwfn
+- * @param dst_id - absolute per engine
++ * @p_hwfn: HW device data.
++ * @src_id: Relative to p_hwfn.
++ * @dst_id: Absolute per engine.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+ /**
+- * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
++ * qed_fw_rss_eng(): Get absolute RSS engine ID.
+ *
+- * @param p_hwfn
+- * @param src_id - relative to p_hwfn
+- * @param dst_id - absolute per engine
++ * @p_hwfn: HW device data.
++ * @src_id: Relative to p_hwfn.
++ * @dst_id: Absolute per engine.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+ /**
+- * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
+- * banks that are allocated to the PF.
++ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
++ * banks that are allocated to the PF.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return u8 - Number of LLH filter banks
++ * Return: u8 Number of LLH filter banks.
+ */
+ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
+
+@@ -314,45 +348,50 @@ enum qed_eng {
+ };
+
+ /**
+- * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
+- * LLH filter bank.
++ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
++ * LLH filter bank.
+ *
+- * @param cdev
+- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+- * @param eng
++ * @cdev: Qed dev pointer.
++ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
++ * @eng: Engine.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
+ u8 ppfid, enum qed_eng eng);
+
+ /**
+- * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
++ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
+ *
+- * @param cdev
+- * @param eng
++ * @cdev: Qed dev pointer.
++ * @eng: Engine.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
+
+ /**
+- * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
+- * bank.
++ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
++ * bank.
+ *
+- * @param cdev
+- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+- * @param mac_addr - MAC to add
++ * @cdev: Qed dev pointer.
++ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
++ * @mac_addr: MAC to add.
++ *
++ * Return: Int.
+ */
+ int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
+
+ /**
+- * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
+- * filter bank.
++ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
++ * filter bank.
++ *
++ * @cdev: Qed dev pointer.
++ * @ppfid: Ppfid.
++ * @mac_addr: MAC to remove
+ *
+- * @param p_ptt
+- * @param p_filter - MAC to remove
++ * Return: Void.
+ */
+ void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
+@@ -368,15 +407,16 @@ enum qed_llh_prot_filter_type_t {
+ };
+
+ /**
+- * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
+- * given filter bank.
++ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
++ * given filter bank.
++ *
++ * @cdev: Qed dev pointer.
++ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
++ * @type: Type of filters and comparing.
++ * @source_port_or_eth_type: Source port or ethertype to add.
++ * @dest_port: Destination port to add.
+ *
+- * @param cdev
+- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+- * @param type - type of filters and comparing
+- * @param source_port_or_eth_type - source port or ethertype to add
+- * @param dest_port - destination port to add
+- * @param type - type of filters and comparing
++ * Return: Int.
+ */
+ int
+ qed_llh_add_protocol_filter(struct qed_dev *cdev,
+@@ -385,14 +425,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u16 source_port_or_eth_type, u16 dest_port);
+
+ /**
+- * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
+- * the given filter bank.
++ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
++ * the given filter bank.
+ *
+- * @param cdev
+- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+- * @param type - type of filters and comparing
+- * @param source_port_or_eth_type - source port or ethertype to add
+- * @param dest_port - destination port to add
++ * @cdev: Qed dev pointer.
++ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
++ * @type: Type of filters and comparing.
++ * @source_port_or_eth_type: Source port or ethertype to add.
++ * @dest_port: Destination port to add.
+ */
+ void
+ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+@@ -401,31 +441,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u16 source_port_or_eth_type, u16 dest_port);
+
+ /**
+- * *@brief Cleanup of previous driver remains prior to load
++ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param id - For PF, engine-relative. For VF, PF-relative.
+- * @param is_vf - true iff cleanup is made for a VF.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @id: For PF, engine-relative. For VF, PF-relative.
++ * @is_vf: True iff cleanup is made for a VF.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
+
+ /**
+- * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
++ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
+ *
+- * @param p_hwfn
+- * @param p_coal - store coalesce value read from the hardware.
+- * @param p_handle
++ * @p_hwfn: HW device data.
++ * @coal: Store coalesce value read from the hardware.
++ * @handle: P_handle.
+ *
+- * @return int
++ * Return: Int.
+ **/
+ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
+
+ /**
+- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
++ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+@@ -433,37 +473,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
++ * @rx_coal: Rx Coalesce value in micro seconds.
++ * @tx_coal: TX Coalesce value in micro seconds.
++ * @p_handle: P_handle.
+ *
+- * @param rx_coal - Rx Coalesce value in micro seconds.
+- * @param tx_coal - TX Coalesce value in micro seconds.
+- * @param p_handle
+- *
+- * @return int
++ * Return: Int.
+ **/
+ int
+ qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
+
+ /**
+- * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
++ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param b_enable - true/false
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @b_enable: True/False.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable);
+
+ /**
+- * @brief db_recovery_add - add doorbell information to the doorbell
+- * recovery mechanism.
++ * qed_db_recovery_add(): add doorbell information to the doorbell
++ * recovery mechanism.
++ *
++ * @cdev: Qed dev pointer.
++ * @db_addr: Doorbell address.
++ * @db_data: Address of where db_data is stored.
++ * @db_width: Doorbell is 32b pr 64b.
++ * @db_space: Doorbell recovery addresses are user or kernel space.
+ *
+- * @param cdev
+- * @param db_addr - doorbell address
+- * @param db_data - address of where db_data is stored
+- * @param db_width - doorbell is 32b pr 64b
+- * @param db_space - doorbell recovery addresses are user or kernel space
++ * Return: Int.
+ */
+ int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+@@ -472,13 +513,15 @@ int qed_db_recovery_add(struct qed_dev *cdev,
+ enum qed_db_rec_space db_space);
+
+ /**
+- * @brief db_recovery_del - remove doorbell information from the doorbell
++ * qed_db_recovery_del() - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+- * @param cdev
+- * @param db_addr - doorbell address
+- * @param db_data - address where db_data is stored. Serves as key for the
++ * @cdev: Qed dev pointer.
++ * @db_addr: doorbell address.
++ * @db_data: address where db_data is stored. Serves as key for the
+ * entry to delete.
++ *
++ * Return: Int.
+ */
+ int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+index b768f0698170e..0c55249b3a358 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+@@ -694,13 +694,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
+ }
+
+ static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
+- struct qed_fcoe_stats *p_stats)
++ struct qed_fcoe_stats *p_stats,
++ bool is_atomic)
+ {
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+- p_ptt = qed_ptt_acquire(p_hwfn);
++ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+@@ -974,19 +975,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ }
+
++static int qed_fcoe_stats_context(struct qed_dev *cdev,
++ struct qed_fcoe_stats *stats,
++ bool is_atomic)
++{
++ return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
++}
++
+ static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
+ {
+- return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
++ return qed_fcoe_stats_context(cdev, stats, false);
+ }
+
+ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+- struct qed_mcp_fcoe_stats *stats)
++ struct qed_mcp_fcoe_stats *stats,
++ bool is_atomic)
+ {
+ struct qed_fcoe_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+- if (qed_fcoe_stats(cdev, &proto_stats)) {
++ if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect FCoE statistics\n");
+ return;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+index 19c85adf4ceb1..214e8299ecb4e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+@@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+ void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
+
+ void qed_fcoe_free(struct qed_hwfn *p_hwfn);
++/**
++ * qed_get_protocol_stats_fcoe(): Fills provided statistics
++ * struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: Void.
++ */
+ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+- struct qed_mcp_fcoe_stats *stats);
++ struct qed_mcp_fcoe_stats *stats,
++ bool is_atomic);
+ #else /* CONFIG_QED_FCOE */
+ static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+ {
+@@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+ static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
+
+ static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+- struct qed_mcp_fcoe_stats *stats)
++ struct qed_mcp_fcoe_stats *stats,
++ bool is_atomic)
+ {
+ }
+ #endif /* CONFIG_QED_FCOE */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index fb1baa2da2d0d..744c82a108754 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -3012,96 +3012,102 @@ struct iro {
+ /***************************** Public Functions *******************************/
+
+ /**
+- * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+- * arrays.
++ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
++ * arrays.
+ *
+- * @param p_hwfn - HW device data
+- * @param bin_ptr - a pointer to the binary data with debug arrays.
++ * @p_hwfn: HW device data.
++ * @bin_ptr: A pointer to the binary data with debug arrays.
++ *
++ * Return: enum dbg status.
+ */
+ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+ /**
+- * @brief qed_read_regs - Reads registers into a buffer (using GRC).
++ * qed_read_regs(): Reads registers into a buffer (using GRC).
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf: Destination buffer.
++ * @addr: Source GRC address in dwords.
++ * @len: Number of registers to read.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf - Destination buffer.
+- * @param addr - Source GRC address in dwords.
+- * @param len - Number of registers to read.
++ * Return: Void.
+ */
+ void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+ /**
+- * @brief qed_read_fw_info - Reads FW info from the chip.
++ * qed_read_fw_info(): Reads FW info from the chip.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @fw_info: (Out) a pointer to write the FW info into.
++ *
++ * Return: True if the FW info was read successfully from one of the Storms,
++ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param fw_info - Out: a pointer to write the FW info into.
+- *
+- * @return true if the FW info was read successfully from one of the Storms,
+- * or false if all Storms are in reset.
+ */
+ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+ /**
+- * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
++ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+- * @param p_hwfn - HW device data
+- * @param grc_param - GRC parameter
+- * @param val - Value to set.
++ * @p_hwfn: HW device data.
++ * @grc_param: GRC parameter.
++ * @val: Value to set.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - grc_param is invalid
+- * - val is outside the allowed boundaries
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - Grc_param is invalid.
++ * - Val is outside the allowed boundaries.
+ */
+ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+ /**
+- * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+- * default value.
++ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
++ * default value.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn - HW device data
++ * Return: Void.
+ */
+ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+ /**
+- * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+- * GRC Dump.
++ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
++ * GRC Dump.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+- * data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
++ * data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+ /**
+- * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the collected GRC data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified dump buffer is too small
+- * Otherwise, returns ok.
++ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the collected GRC data into.
++ * @buf_size_in_dwords:Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified dump buffer is too small.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3110,36 +3116,36 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+- * for idle check results.
++ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
++ * for idle check results.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+- * data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
++ * data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * return: Error if one of the following holds:
++ * - The version wasn't set.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+ /**
+- * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+- * into the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the idle check data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * Otherwise, returns ok.
++ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
++ * into the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the idle check data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified buffer is too small.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3148,42 +3154,42 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+- * for mcp trace results.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the trace data in MCP scratchpad contain an invalid signature
+- * - the bundle ID in NVRAM is invalid
+- * - the trace meta data cannot be found (in NVRAM or image file)
+- * Otherwise, returns ok.
++ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
++ * for mcp trace results.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The trace data in MCP scratchpad contain an invalid signature.
++ * - The bundle ID in NVRAM is invalid.
++ * - The trace meta data cannot be found (in NVRAM or image file).
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+ /**
+- * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+- * into the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the mcp trace data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * - the trace data in MCP scratchpad contain an invalid signature
+- * - the bundle ID in NVRAM is invalid
+- * - the trace meta data cannot be found (in NVRAM or image file)
+- * - the trace meta data cannot be read (from NVRAM or image file)
+- * Otherwise, returns ok.
++ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
++ * into the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the mcp trace data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified buffer is too small.
++ * - The trace data in MCP scratchpad contain an invalid signature.
++ * - The bundle ID in NVRAM is invalid.
++ * - The trace meta data cannot be found (in NVRAM or image file).
++ * - The trace meta data cannot be read (from NVRAM or image file).
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3192,36 +3198,36 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+- * for grc trace fifo results.
++ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
++ * for grc trace fifo results.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+ /**
+- * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+- * the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the reg fifo data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * - DMAE transaction failed
+- * Otherwise, returns ok.
++ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
++ * the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the reg fifo data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified buffer is too small.
++ * - DMAE transaction failed.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3230,37 +3236,37 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+- * for the IGU fifo results.
++ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
++ * for the IGU fifo results.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+- * data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
++ * data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+ /**
+- * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+- * the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the IGU fifo data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * - DMAE transaction failed
+- * Otherwise, returns ok.
++ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
++ * the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the IGU fifo data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * - The specified buffer is too small
++ * - DMAE transaction failed
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3269,37 +3275,37 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+- * buffer size for protection override window results.
++ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
++ * buffer size for protection override window results.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for protection
+- * override data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) Required buffer size (in dwords) for protection
++ * override data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * Otherwise, returns ok.
+ */
+ enum dbg_status
+ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+ /**
+- * @brief qed_dbg_protection_override_dump - Reads protection override window
+- * entries and writes the results into the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the protection override data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * - DMAE transaction failed
+- * Otherwise, returns ok.
++ * qed_dbg_protection_override_dump(): Reads protection override window
++ * entries and writes the results into the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the protection override data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * @return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified buffer is too small.
++ * - DMAE transaction failed.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3307,34 +3313,34 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+ /**
+- * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+- * size for FW Asserts results.
++ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
++ * size for FW Asserts results.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+ /**
+- * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+- * into the specified buffer.
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param dump_buf - Pointer to write the FW Asserts data into.
+- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+- * @param num_dumped_dwords - OUT: number of dumped dwords.
+- *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * - the specified buffer is too small
+- * Otherwise, returns ok.
++ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
++ * into the specified buffer.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dump_buf: Pointer to write the FW Asserts data into.
++ * @buf_size_in_dwords: Size of the specified buffer in dwords.
++ * @num_dumped_dwords: (OUT) number of dumped dwords.
++ *
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * - The specified buffer is too small.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3343,19 +3349,19 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *num_dumped_dwords);
+
+ /**
+- * @brief qed_dbg_read_attn - Reads the attention registers of the specified
++ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - Ptt window used for writing the registers.
+- * @param block - Block ID.
+- * @param attn_type - Attention type.
+- * @param clear_status - Indicates if the attention status should be cleared.
+- * @param results - OUT: Pointer to write the read results into
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @block: Block ID.
++ * @attn_type: Attention type.
++ * @clear_status: Indicates if the attention status should be cleared.
++ * @results: (OUT) Pointer to write the read results into.
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3365,15 +3371,15 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+ /**
+- * @brief qed_dbg_print_attn - Prints attention registers values in the
+- * specified results struct.
++ * qed_dbg_print_attn(): Prints attention registers values in the
++ * specified results struct.
+ *
+- * @param p_hwfn
+- * @param results - Pointer to the attention read results
++ * @p_hwfn: HW device data.
++ * @results: Pointer to the attention read results
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+@@ -3420,60 +3426,64 @@ struct dbg_tools_user_data {
+ /***************************** Public Functions *******************************/
+
+ /**
+- * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+- * debug arrays.
++ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
++ * debug arrays.
+ *
+- * @param p_hwfn - HW device data
+- * @param bin_ptr - a pointer to the binary data with debug arrays.
++ * @p_hwfn: HW device data.
++ * @bin_ptr: a pointer to the binary data with debug arrays.
++ *
++ * Return: dbg_status.
+ */
+ enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+ /**
+- * @brief qed_dbg_alloc_user_data - Allocates user debug data.
++ * qed_dbg_alloc_user_data(): Allocates user debug data.
++ *
++ * @p_hwfn: HW device data.
++ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+- * @param p_hwfn - HW device data
+- * @param user_data_ptr - OUT: a pointer to the allocated memory.
++ * Return: dbg_status.
+ */
+ enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+ /**
+- * @brief qed_dbg_get_status_str - Returns a string for the specified status.
++ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+- * @param status - a debug status code.
++ * @status: A debug status code.
+ *
+- * @return a string for the specified status
++ * Return: A string for the specified status.
+ */
+ const char *qed_dbg_get_status_str(enum dbg_status status);
+
+ /**
+- * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+- * for idle check results (in bytes).
++ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
++ * for idle check results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - idle check dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: idle check dump buffer.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+ /**
+- * @brief qed_print_idle_chk_results - Prints idle check results
++ * qed_print_idle_chk_results(): Prints idle check results
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - idle check dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the idle check results.
+- * @param num_errors - OUT: number of errors found in idle check.
+- * @param num_warnings - OUT: number of warnings found in idle check.
++ * @p_hwfn: HW device data.
++ * @dump_buf: idle check dump buffer.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf: buffer for printing the idle check results.
++ * @num_errors: (OUT) number of errors found in idle check.
++ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3483,28 +3493,30 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *num_warnings);
+
+ /**
+- * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
++ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
++ *
++ * @p_hwfn: HW device data.
++ * @meta_buf: Meta buffer.
++ *
++ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+- *
+- * @param data - pointer to MCP Trace meta data
+- * @param size - size of MCP Trace meta data in dwords
+ */
+ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+ /**
+- * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+- * for MCP Trace results (in bytes).
++ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
++ * for MCP Trace results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - MCP Trace dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: MCP Trace dump buffer.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Rrror if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3512,14 +3524,14 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *results_buf_size);
+
+ /**
+- * @brief qed_print_mcp_trace_results - Prints MCP Trace results
++ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - mcp trace dump buffer, starting from the header.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the mcp trace results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: MCP trace dump buffer, starting from the header.
++ * @num_dumped_dwords: Member of dwords that were dumped.
++ * @results_buf: Buffer for printing the mcp trace results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3527,30 +3539,30 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
++ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - mcp trace dump buffer, starting from the header.
+- * @param results_buf - buffer for printing the mcp trace results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: MVP trace dump buffer, starting from the header.
++ * @results_buf: Buffer for printing the mcp trace results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+ /**
+- * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
++ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - mcp trace dump buffer, starting from the header.
+- * @param num_dumped_bytes - number of bytes that were dumped.
+- * @param results_buf - buffer for printing the mcp trace results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: MCP trace dump buffer, starting from the header.
++ * @num_dumped_bytes: Number of bytes that were dumped.
++ * @results_buf: Buffer for printing the mcp trace results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+@@ -3558,24 +3570,26 @@ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
++ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+- * @param p_hwfn - HW device data
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+- * for reg_fifo results (in bytes).
++ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
++ * for reg_fifo results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - reg fifo dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: Reg fifo dump buffer.
++ * @num_dumped_dwords: Number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3583,14 +3597,14 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *results_buf_size);
+
+ /**
+- * @brief qed_print_reg_fifo_results - Prints reg fifo results
++ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - reg fifo dump buffer, starting from the header.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the reg fifo results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: Reg fifo dump buffer, starting from the header.
++ * @num_dumped_dwords: Number of dwords that were dumped.
++ * @results_buf: Buffer for printing the reg fifo results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3598,16 +3612,16 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+- * for igu_fifo results (in bytes).
++ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
++ * for igu_fifo results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - IGU fifo dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: IGU fifo dump buffer.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3615,14 +3629,14 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *results_buf_size);
+
+ /**
+- * @brief qed_print_igu_fifo_results - Prints IGU fifo results
++ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - IGU fifo dump buffer, starting from the header.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the IGU fifo results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: IGU fifo dump buffer, starting from the header.
++ * @num_dumped_dwords: Number of dwords that were dumped.
++ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3630,16 +3644,16 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief qed_get_protection_override_results_buf_size - Returns the required
+- * buffer size for protection override results (in bytes).
++ * qed_get_protection_override_results_buf_size(): Returns the required
++ * buffer size for protection override results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - protection override dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: Protection override dump buffer.
++ * @num_dumped_dwords: Number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status
+ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+@@ -3648,15 +3662,15 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *results_buf_size);
+
+ /**
+- * @brief qed_print_protection_override_results - Prints protection override
+- * results.
++ * qed_print_protection_override_results(): Prints protection override
++ * results.
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - protection override dump buffer, starting from the header.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the reg fifo results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: Protection override dump buffer, starting from the header.
++ * @num_dumped_dwords: Number of dwords that were dumped.
++ * @results_buf: Buffer for printing the reg fifo results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3664,16 +3678,16 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+- * for FW Asserts results (in bytes).
++ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
++ * for FW Asserts results (in bytes).
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - FW Asserts dump buffer.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+- * results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: FW Asserts dump buffer.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
++ * results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3681,14 +3695,14 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *results_buf_size);
+
+ /**
+- * @brief qed_print_fw_asserts_results - Prints FW Asserts results
++ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+- * @param p_hwfn - HW device data
+- * @param dump_buf - FW Asserts dump buffer, starting from the header.
+- * @param num_dumped_dwords - number of dwords that were dumped.
+- * @param results_buf - buffer for printing the FW Asserts results.
++ * @p_hwfn: HW device data.
++ * @dump_buf: FW Asserts dump buffer, starting from the header.
++ * @num_dumped_dwords: number of dwords that were dumped.
++ * @results_buf: buffer for printing the FW Asserts results.
+ *
+- * @return error if the parsing fails, ok otherwise.
++ * Return: Error if the parsing fails, ok otherwise.
+ */
+ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+@@ -3696,15 +3710,15 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ char *results_buf);
+
+ /**
+- * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
+- * the specified results struct.
++ * qed_dbg_parse_attn(): Parses and prints attention registers values in
++ * the specified results struct.
+ *
+- * @param p_hwfn - HW device data
+- * @param results - Pointer to the attention read results
++ * @p_hwfn: HW device data.
++ * @results: Pointer to the attention read results
+ *
+- * @return error if one of the following holds:
+- * - the version wasn't set
+- * Otherwise, returns ok.
++ * Return: Error if one of the following holds:
++ * - The version wasn't set.
++ * Otherwise, returns ok.
+ */
+ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+@@ -3746,18 +3760,18 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+
+ /**
+- * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
++ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
+ *
+- * Returns the required host memory size in 4KB units.
+- * Must be called before all QM init HSI functions.
++ * @num_pf_cids: Number of connections used by this PF.
++ * @num_vf_cids: Number of connections used by VFs of this PF.
++ * @num_tids: Number of tasks used by this PF.
++ * @num_pf_pqs: Number of PQs used by this PF.
++ * @num_vf_pqs: Number of PQs used by VFs of this PF.
+ *
+- * @param num_pf_cids - number of connections used by this PF
+- * @param num_vf_cids - number of connections used by VFs of this PF
+- * @param num_tids - number of tasks used by this PF
+- * @param num_pf_pqs - number of PQs used by this PF
+- * @param num_vf_pqs - number of PQs used by VFs of this PF
++ * Return: The required host memory size in 4KB units.
+ *
+- * @return The required host memory size in 4KB units.
++ * Returns the required host memory size in 4KB units.
++ * Must be called before all QM init HSI functions.
+ */
+ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+@@ -3800,74 +3814,74 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+ /**
+- * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
++ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers
+- * @param pf_id - PF ID
+- * @param pf_wfq - WFQ weight. Must be non-zero.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers
++ * @pf_id: PF ID
++ * @pf_wfq: WFQ weight. Must be non-zero.
+ *
+- * @return 0 on success, -1 on error.
++ * Return: 0 on success, -1 on error.
+ */
+ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+
+ /**
+- * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
++ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers
+- * @param pf_id - PF ID
+- * @param pf_rl - rate limit in Mb/sec units
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @pf_id: PF ID.
++ * @pf_rl: rate limit in Mb/sec units
+ *
+- * @return 0 on success, -1 on error.
++ * Return: 0 on success, -1 on error.
+ */
+ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+
+ /**
+- * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
++ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers
+- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+- * with the VPORT for each TC. This array is filled by
+- * qed_qm_pf_rt_init
+- * @param vport_wfq - WFQ weight. Must be non-zero.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers
++ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
++ * with the VPORT for each TC. This array is filled by
++ * qed_qm_pf_rt_init
++ * @wfq: WFQ weight. Must be non-zero.
+ *
+- * @return 0 on success, -1 on error.
++ * Return: 0 on success, -1 on error.
+ */
+ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
+
+ /**
+- * @brief qed_init_global_rl - Initializes the rate limit of the specified
+- * rate limiter
++ * qed_init_global_rl(): Initializes the rate limit of the specified
++ * rate limiter.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers
+- * @param rl_id - RL ID
+- * @param rate_limit - rate limit in Mb/sec units
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @rl_id: RL ID.
++ * @rate_limit: Rate limit in Mb/sec units
+ *
+- * @return 0 on success, -1 on error.
++ * Return: 0 on success, -1 on error.
+ */
+ int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rl_id, u32 rate_limit);
+
+ /**
+- * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
++ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param is_release_cmd - true for release, false for stop.
+- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+- * @param start_pq - first PQ ID to stop
+- * @param num_pqs - Number of PQs to stop, starting from start_pq.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @is_release_cmd: true for release, false for stop.
++ * @is_tx_pq: true for Tx PQs, false for Other PQs.
++ * @start_pq: first PQ ID to stop
++ * @num_pqs: Number of PQs to stop, starting from start_pq.
+ *
+- * @return bool, true if successful, false if timeout occurred while waiting for
+- * QM command done.
++ * Return: Bool, true if successful, false if timeout occurred while waiting
++ * for QM command done.
+ */
+ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3875,53 +3889,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
+
+ /**
+- * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
++ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param dest_port - vxlan destination udp port.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dest_port: vxlan destination udp port.
++ *
++ * Return: Void.
+ */
+ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+ /**
+- * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
++ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @vxlan_enable: vxlan enable flag.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param vxlan_enable - vxlan enable flag.
++ * Return: Void.
+ */
+ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+
+ /**
+- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
++ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @eth_gre_enable: Eth GRE enable flag.
++ * @ip_gre_enable: IP GRE enable flag.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param eth_gre_enable - eth GRE enable enable flag.
+- * @param ip_gre_enable - IP GRE enable enable flag.
++ * Return: Void.
+ */
+ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+
+ /**
+- * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
++ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param dest_port - geneve destination udp port.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @dest_port: Geneve destination udp port.
++ *
++ * Retur: Void.
+ */
+ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+ /**
+- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
++ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @eth_geneve_enable: Eth GENEVE enable flag.
++ * @ip_geneve_enable: IP GENEVE enable flag.
+ *
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param eth_geneve_enable - eth GENEVE enable enable flag.
+- * @param ip_geneve_enable - IP GENEVE enable enable flag.
++ * Return: Void.
+ */
+ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3931,25 +3956,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable);
+
+ /**
+- * @brief qed_gft_disable - Disable GFT
++ * qed_gft_disable(): Disable GFT.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @pf_id: PF on which to disable GFT.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param pf_id - pf on which to disable GFT.
++ * Return: Void.
+ */
+ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+ /**
+- * @brief qed_gft_config - Enable and configure HW for GFT
+- *
+- * @param p_hwfn - HW device data
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param pf_id - pf on which to enable GFT.
+- * @param tcp - set profile tcp packets.
+- * @param udp - set profile udp packet.
+- * @param ipv4 - set profile ipv4 packet.
+- * @param ipv6 - set profile ipv6 packet.
+- * @param profile_type - define packet same fields. Use enum gft_profile_type.
++ * qed_gft_config(): Enable and configure HW for GFT.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @pf_id: PF on which to enable GFT.
++ * @tcp: Set profile tcp packets.
++ * @udp: Set profile udp packet.
++ * @ipv4: Set profile ipv4 packet.
++ * @ipv6: Set profile ipv6 packet.
++ * @profile_type: Define packet same fields. Use enum gft_profile_type.
++ *
++ * Return: Void.
+ */
+ void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -3959,107 +3988,120 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+ /**
+- * @brief qed_enable_context_validation - Enable and configure context
+- * validation.
++ * qed_enable_context_validation(): Enable and configure context
++ * validation.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
+ *
+- * @param p_hwfn
+- * @param p_ptt - ptt window used for writing the registers.
++ * Return: Void.
+ */
+ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+- * session context.
++ * qed_calc_session_ctx_validation(): Calcualte validation byte for
++ * session context.
+ *
+- * @param p_ctx_mem - pointer to context memory.
+- * @param ctx_size - context size.
+- * @param ctx_type - context type.
+- * @param cid - context cid.
++ * @p_ctx_mem: Pointer to context memory.
++ * @ctx_size: Context size.
++ * @ctx_type: Context type.
++ * @cid: Context cid.
++ *
++ * Return: Void.
+ */
+ void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid);
+
+ /**
+- * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+- * context.
++ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
++ * context.
++ *
++ * @p_ctx_mem: Pointer to context memory.
++ * @ctx_size: Context size.
++ * @ctx_type: Context type.
++ * @tid: Context tid.
+ *
+- * @param p_ctx_mem - pointer to context memory.
+- * @param ctx_size - context size.
+- * @param ctx_type - context type.
+- * @param tid - context tid.
++ * Return: Void.
+ */
+ void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid);
+
+ /**
+- * @brief qed_memset_session_ctx - Memset session context to 0 while
+- * preserving validation bytes.
++ * qed_memset_session_ctx(): Memset session context to 0 while
++ * preserving validation bytes.
++ *
++ * @p_ctx_mem: Pointer to context memory.
++ * @ctx_size: Size to initialzie.
++ * @ctx_type: Context type.
+ *
+- * @param p_hwfn -
+- * @param p_ctx_mem - pointer to context memory.
+- * @param ctx_size - size to initialzie.
+- * @param ctx_type - context type.
++ * Return: Void.
+ */
+ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+ /**
+- * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+- * validation bytes.
++ * qed_memset_task_ctx(): Memset task context to 0 while preserving
++ * validation bytes.
+ *
+- * @param p_ctx_mem - pointer to context memory.
+- * @param ctx_size - size to initialzie.
+- * @param ctx_type - context type.
++ * @p_ctx_mem: Pointer to context memory.
++ * @ctx_size: size to initialzie.
++ * @ctx_type: context type.
++ *
++ * Return: Void.
+ */
+ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+ #define NUM_STORMS 6
+
+ /**
+- * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
+- * If the severity of the error will be
+- * above the level, the FW will assert.
+- * @param p_hwfn - HW device data
+- * @param p_ptt - ptt window used for writing the registers
+- * @param assert_level - An array of assert levels for each storm.
++ * qed_set_rdma_error_level(): Sets the RDMA assert level.
++ * If the severity of the error will be
++ * above the level, the FW will assert.
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @assert_level: An array of assert levels for each storm.
+ *
++ * Return: Void.
+ */
+ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+ /**
+- * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
++ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
+ *
+- * @param p_hwfn - HW device data
+- * @param fw_overlay_in_buf - the input FW overlay buffer.
+- * @param buf_size - the size of the input FW overlay buffer in bytes.
+- * must be aligned to dwords.
+- * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
++ * @p_hwfn: HW device data.
++ * @fw_overlay_in_buf: The input FW overlay buffer.
++ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
++ * must be aligned to dwords.
+ *
+- * @return a pointer to the allocated overlays memory,
++ * Return: A pointer to the allocated overlays memory,
+ * or NULL in case of failures.
+ */
+ struct phys_mem_desc *
+ qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+- const u32 * const fw_overlay_in_buf,
++ const u32 *const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
+
+ /**
+- * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
++ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: Ptt window used for writing the registers.
++ * @fw_overlay_mem: the allocated FW overlay memory.
+ *
+- * @param p_hwfn - HW device data.
+- * @param p_ptt - ptt window used for writing the registers.
+- * @param fw_overlay_mem - the allocated FW overlay memory.
++ * Return: Void.
+ */
+ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem);
+
+ /**
+- * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
++ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
++ *
++ * @p_hwfn: HW device data.
++ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+- * @param p_hwfn - HW device data.
+- * @param fw_overlay_mem - the allocated FW overlay memory to free.
++ * Return: Void.
+ */
+ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+index 554f30b0cfd5e..6263f847b6b92 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+@@ -23,7 +23,10 @@
+ #include "qed_reg_addr.h"
+ #include "qed_sriov.h"
+
+-#define QED_BAR_ACQUIRE_TIMEOUT 1000
++#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
++#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
++#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
++#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
+
+ /* Invalid values */
+ #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+@@ -84,12 +87,22 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+ }
+
+ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
++{
++ return qed_ptt_acquire_context(p_hwfn, false);
++}
++
++struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
+ {
+ struct qed_ptt *p_ptt;
+- unsigned int i;
++ unsigned int i, count;
++
++ if (is_atomic)
++ count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
++ else
++ count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
+
+ /* Take the free PTT from the list */
+- for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
++ for (i = 0; i < count; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+@@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+- usleep_range(1000, 2000);
++
++ if (is_atomic)
++ udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
++ else
++ usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
++ QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
+index 2734f49956f76..e535983ce21bb 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
+@@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask {
+ #define DMAE_MAX_CLIENTS 32
+
+ /**
+- * @brief qed_gtt_init - Initialize GTT windows
++ * qed_gtt_init(): Initialize GTT windows.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
++ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
++ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return struct _qed_status - success (0), negative - error.
++ * Return: struct _qed_status - success (0), negative - error.
+ */
+ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ptt_pool_free -
++ * qed_ptt_pool_free(): Free PTT pool.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
++ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt
+ *
+- * @return u32
++ * Return: u32.
+ */
+ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
++ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_ptt: P_ptt
+ *
+- * @return u32
++ * Return: u32.
+ */
+ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
++ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
+ *
+- * @param p_hwfn
+- * @param new_hw_addr
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @new_hw_addr: New HW address.
++ * @p_ptt: P_Ptt
++ *
++ * Return: Void.
+ */
+ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+ /**
+- * @brief qed_get_reserved_ptt - Get a specific reserved PTT
++ * qed_get_reserved_ptt(): Get a specific reserved PTT.
+ *
+- * @param p_hwfn
+- * @param ptt_idx
++ * @p_hwfn: HW device data.
++ * @ptt_idx: Ptt Index.
+ *
+- * @return struct qed_ptt *
++ * Return: struct qed_ptt *.
+ */
+ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+ /**
+- * @brief qed_wr - Write value to BAR using the given ptt
++ * qed_wr(): Write value to BAR using the given ptt.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @val: Val.
++ * @hw_addr: HW address
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param val
+- * @param hw_addr
++ * Return: Void.
+ */
+ void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn,
+ u32 val);
+
+ /**
+- * @brief qed_rd - Read value from BAR using the given ptt
++ * qed_rd(): Read value from BAR using the given ptt.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @hw_addr: HW address
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param val
+- * @param hw_addr
++ * Return: Void.
+ */
+ u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+ /**
+- * @brief qed_memcpy_from - copy n bytes from BAR using the given
+- * ptt
+- *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param dest
+- * @param hw_addr
+- * @param n
++ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @dest: Destination.
++ * @hw_addr: HW address.
++ * @n: N
++ *
++ * Return: Void.
+ */
+ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ size_t n);
+
+ /**
+- * @brief qed_memcpy_to - copy n bytes to BAR using the given
+- * ptt
+- *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param hw_addr
+- * @param src
+- * @param n
++ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @hw_addr: HW address.
++ * @src: Source.
++ * @n: N
++ *
++ * Return: Void.
+ */
+ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ void *src,
+ size_t n);
+ /**
+- * @brief qed_fid_pretend - pretend to another function when
+- * accessing the ptt window. There is no way to unpretend
+- * a function. The only way to cancel a pretend is to
+- * pretend back to the original function.
+- *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param fid - fid field of pxp_pretend structure. Can contain
+- * either pf / vf, port/path fields are don't care.
++ * qed_fid_pretend(): pretend to another function when
++ * accessing the ptt window. There is no way to unpretend
++ * a function. The only way to cancel a pretend is to
++ * pretend back to the original function.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @fid: fid field of pxp_pretend structure. Can contain
++ * either pf / vf, port/path fields are don't care.
++ *
++ * Return: Void.
+ */
+ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+ /**
+- * @brief qed_port_pretend - pretend to another port when
+- * accessing the ptt window
++ * qed_port_pretend(): Pretend to another port when accessing the ptt window
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param port_id - the port to pretend to
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @port_id: The port to pretend to
++ *
++ * Return: Void.
+ */
+ void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+ /**
+- * @brief qed_port_unpretend - cancel any previously set port
+- * pretend
++ * qed_port_unpretend(): Cancel any previously set port pretend
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Return: Void.
+ */
+ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_port_fid_pretend - pretend to another port and another function
+- * when accessing the ptt window
++ * qed_port_fid_pretend(): Pretend to another port and another function
++ * when accessing the ptt window
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @port_id: The port to pretend to
++ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param port_id - the port to pretend to
+- * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
++ * Return: Void.
+ */
+ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id, u16 fid);
+
+ /**
+- * @brief qed_vfid_to_concrete - build a concrete FID for a
+- * given VF ID
++ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param vfid
++ * @p_hwfn: HW device data.
++ * @vfid: VFID.
++ *
++ * Return: Void.
+ */
+ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+ /**
+- * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+- * this is declared here since other files will require it.
+- * @param idx
++ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
++ * this is declared here since other files will require it.
++ *
++ * @idx: Index
++ *
++ * Return: Void.
+ */
+ u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+ /**
+- * @brief qed_dmae_info_alloc - Init the dmae_info structure
+- * which is part of p_hwfn.
+- * @param p_hwfn
++ * qed_dmae_info_alloc(): Init the dmae_info structure
++ * which is part of p_hwfn.
++ *
++ * @p_hwfn: HW device data.
++ *
++ * Return: Int.
+ */
+ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_dmae_info_free - Free the dmae_info structure
+- * which is part of p_hwfn
++ * qed_dmae_info_free(): Free the dmae_info structure
++ * which is part of p_hwfn.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+@@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ #define QED_HW_ERR_MAX_STR_SIZE 256
+
+ /**
+- * @brief qed_hw_err_notify - Notify upper layer driver and management FW
+- * about a HW error.
+- *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param err_type
+- * @param fmt - debug data buffer to send to the MFW
+- * @param ... - buffer format args
++ * qed_hw_err_notify(): Notify upper layer driver and management FW
++ * about a HW error.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @err_type: Err Type.
++ * @fmt: Debug data buffer to send to the MFW
++ * @...: buffer format args
++ *
++ * Return void.
+ */
+ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+index a573c89219820..1dbc460c9eec8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+@@ -12,23 +12,24 @@
+ #include "qed.h"
+
+ /**
+- * @brief qed_init_iro_array - init iro_arr.
++ * qed_init_iro_array(): init iro_arr.
+ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: Void.
+ */
+ void qed_init_iro_array(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_init_run - Run the init-sequence.
++ * qed_init_run(): Run the init-sequence.
+ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @phase: Phase.
++ * @phase_id: Phase ID.
++ * @modes: Mode.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param phase
+- * @param phase_id
+- * @param modes
+- * @return _qed_status_t
++ * Return: _qed_status_t
+ */
+ int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
+ int modes);
+
+ /**
+- * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
++ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
+ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
+- *
+- * @return _qed_status_t
++ * Return: _qed_status_t.
+ */
+ int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_init_hwfn_deallocate
++ * qed_init_free(): Init HW function deallocate.
+ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_init_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
++ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
+ *
++ * @p_hwfn: HW device data.
++ * @rt_offset: RT offset.
++ * @val: Val.
+ *
+- * @param p_hwfn
+- * @param rt_offset
+- * @param val
++ * Return: Void.
+ */
+ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+@@ -72,15 +74,6 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ #define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+-/**
+- * @brief
+- *
+- *
+- * @param p_hwfn
+- * @param rt_offset
+- * @param val
+- * @param size
+- */
+ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+@@ -90,11 +83,12 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+ /**
+- * @brief
+- * Initialize GTT global windows and set admin window
+- * related params of GTT/PTT to default values.
++ * qed_gtt_init(): Initialize GTT global windows and set admin window
++ * related params of GTT/PTT to default values.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return Void.
+ */
+ void qed_gtt_init(struct qed_hwfn *p_hwfn);
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
+index c5550e96bbe1f..eb8e0f4242d79 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
+@@ -53,51 +53,54 @@ enum qed_coalescing_fsm {
+ };
+
+ /**
+- * @brief qed_int_igu_enable_int - enable device interrupts
++ * qed_int_igu_enable_int(): Enable device interrupts.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param int_mode - interrupt mode to use
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @int_mode: Interrupt mode to use.
++ *
++ * Return: Void.
+ */
+ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+ /**
+- * @brief qed_int_igu_disable_int - disable device interrupts
++ * qed_int_igu_disable_int(): Disable device interrupts.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Return: Void.
+ */
+ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+- * register from igu.
++ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
++ * register from igu.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return u64
++ * Return: u64.
+ */
+ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+ #define QED_SP_SB_ID 0xffff
+ /**
+- * @brief qed_int_sb_init - Initializes the sb_info structure.
++ * qed_int_sb_init(): Initializes the sb_info structure.
+ *
+- * once the structure is initialized it can be passed to sb related functions.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @sb_info: points to an uninitialized (but allocated) sb_info structure
++ * @sb_virt_addr: SB Virtual address.
++ * @sb_phy_addr: SB Physial address.
++ * @sb_id: the sb_id to be used (zero based in driver)
++ * should use QED_SP_SB_ID for SP Status block
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param sb_info points to an uninitialized (but
+- * allocated) sb_info structure
+- * @param sb_virt_addr
+- * @param sb_phy_addr
+- * @param sb_id the sb_id to be used (zero based in driver)
+- * should use QED_SP_SB_ID for SP Status block
++ * Return: int.
+ *
+- * @return int
++ * Once the structure is initialized it can be passed to sb related functions.
+ */
+ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+ /**
+- * @brief qed_int_sb_setup - Setup the sb.
++ * qed_int_sb_setup(): Setup the sb.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @sb_info: Initialized sb_info structure.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param sb_info initialized sb_info structure
++ * Return: Void.
+ */
+ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+ /**
+- * @brief qed_int_sb_release - releases the sb_info structure.
++ * qed_int_sb_release(): Releases the sb_info structure.
+ *
+- * once the structure is released, it's memory can be freed
++ * @p_hwfn: HW device data.
++ * @sb_info: Points to an allocated sb_info structure.
++ * @sb_id: The sb_id to be used (zero based in driver)
++ * should never be equal to QED_SP_SB_ID
++ * (SP Status block).
+ *
+- * @param p_hwfn
+- * @param sb_info points to an allocated sb_info structure
+- * @param sb_id the sb_id to be used (zero based in driver)
+- * should never be equal to QED_SP_SB_ID
+- * (SP Status block)
++ * Return: int.
+ *
+- * @return int
++ * Once the structure is released, it's memory can be freed.
+ */
+ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+ /**
+- * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+- * default status block.
++ * qed_int_sp_dpc(): To be called when an interrupt is received on the
++ * default status block.
+ *
+- * @param p_hwfn - pointer to hwfn
++ * @t: Tasklet.
++ *
++ * Return: Void.
+ *
+ */
+ void qed_int_sp_dpc(struct tasklet_struct *t);
+
+ /**
+- * @brief qed_int_get_num_sbs - get the number of status
+- * blocks configured for this funciton in the igu.
++ * qed_int_get_num_sbs(): Get the number of status blocks configured
++ * for this funciton in the igu.
+ *
+- * @param p_hwfn
+- * @param p_sb_cnt_info
++ * @p_hwfn: HW device data.
++ * @p_sb_cnt_info: Pointer to SB count info.
+ *
+- * @return int - number of status blocks configured
++ * Return: Void.
+ */
+ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info);
+
+ /**
+- * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
++ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
++ * Return: Void.
+ */
+ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_int_attn_clr_enable - sets whether the general behavior is
++ * qed_int_attn_clr_enable: Sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+- * @param cdev
+- * @param clr_enable
++ * @cdev: Qed dev pointer.
++ * @clr_enable: Clear enable
++ *
++ * Return: Void.
+ *
+ */
+ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+ /**
+- * @brief - Doorbell Recovery handler.
++ * qed_db_rec_handler(): Doorbell Recovery handler.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Int.
+ */
+ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+@@ -223,30 +235,34 @@ struct qed_igu_info {
+ };
+
+ /**
+- * @brief - Make sure the IGU CAM reflects the resources provided by MFW
++ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
++ * provided by MFW.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Return: Void.
+ */
+ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Translate the weakly-defined client sb-id into an IGU sb-id
++ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
++ * an IGU sb-id
+ *
+- * @param p_hwfn
+- * @param sb_id - user provided sb_id
++ * @p_hwfn: HW device data.
++ * @sb_id: user provided sb_id.
+ *
+- * @return an index inside IGU CAM where the SB resides
++ * Return: An index inside IGU CAM where the SB resides.
+ */
+ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+ /**
+- * @brief return a pointer to an unused valid SB
++ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
+ *
+- * @param p_hwfn
+- * @param b_is_pf - true iff we want a SB belonging to a PF
++ * @p_hwfn: HW device data.
++ * @b_is_pf: True iff we want a SB belonging to a PF.
+ *
+- * @return point to an igu_block, NULL if none is available
++ * Return: Point to an igu_block, NULL if none is available.
+ */
+ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
+ bool b_is_pf);
+@@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_int_igu_read_cam - Reads the IGU CAM.
++ * qed_int_igu_read_cam(): Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+@@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+ /**
+- * @brief qed_int_register_cb - Register callback func for
+- * slowhwfn statusblock.
+- *
+- * Every protocol that uses the slowhwfn status block
+- * should register a callback function that will be called
+- * once there is an update of the sp status block.
+- *
+- * @param p_hwfn
+- * @param comp_cb - function to be called when there is an
+- * interrupt on the sp sb
+- *
+- * @param cookie - passed to the callback function
+- * @param sb_idx - OUT parameter which gives the chosen index
+- * for this protocol.
+- * @param p_fw_cons - pointer to the actual address of the
+- * consumer for this protocol.
+- *
+- * @return int
++ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
++ *
++ * @p_hwfn: HW device data.
++ * @comp_cb: Function to be called when there is an
++ * interrupt on the sp sb
++ * @cookie: Passed to the callback function
++ * @sb_idx: (OUT) parameter which gives the chosen index
++ * for this protocol.
++ * @p_fw_cons: Pointer to the actual address of the
++ * consumer for this protocol.
++ *
++ * Return: Int.
++ *
++ * Every protocol that uses the slowhwfn status block
++ * should register a callback function that will be called
++ * once there is an update of the sp status block.
+ */
+ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+@@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ __le16 **p_fw_cons);
+
+ /**
+- * @brief qed_int_unregister_cb - Unregisters callback
+- * function from sp sb.
+- * Partner of qed_int_register_cb -> should be called
+- * when no longer required.
++ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
++ *
++ * @p_hwfn: HW device data.
++ * @pi: Producer Index.
+ *
+- * @param p_hwfn
+- * @param pi
++ * Return: Int.
+ *
+- * @return int
++ * Partner of qed_int_register_cb -> should be called
++ * when no longer required.
+ */
+ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+ /**
+- * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
++ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return u16
++ * Return: u16.
+ */
+ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief Status block cleanup. Should be called for each status
+- * block that will be used -> both PF / VF
+- *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param igu_sb_id - igu status block id
+- * @param opaque - opaque fid of the sb owner.
+- * @param b_set - set(1) / clear(0)
++ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
++ * Should be called for each status
++ * block that will be used -> both PF / VF.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @igu_sb_id: IGU status block id.
++ * @opaque: Opaque fid of the sb owner.
++ * @b_set: Set(1) / Clear(0).
++ *
++ * Return: Void.
+ */
+ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ bool b_set);
+
+ /**
+- * @brief qed_int_cau_conf - configure cau for a given status
+- * block
+- *
+- * @param p_hwfn
+- * @param ptt
+- * @param sb_phys
+- * @param igu_sb_id
+- * @param vf_number
+- * @param vf_valid
++ * qed_int_cau_conf_sb(): Configure cau for a given status block.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @sb_phys: SB Physical.
++ * @igu_sb_id: IGU status block id.
++ * @vf_number: VF number
++ * @vf_valid: VF valid or not.
++ *
++ * Return: Void.
+ */
+ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ u8 vf_valid);
+
+ /**
+- * @brief qed_int_alloc
++ * qed_int_alloc(): QED interrupt alloc.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief qed_int_free
++ * qed_int_free(): QED interrupt free.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_int_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_int_setup
++ * qed_int_setup(): QED interrupt setup.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Void.
+ */
+ void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief - Enable Interrupt & Attention for hw function
++ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param int_mode
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @int_mode: Interrut mode
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+ /**
+- * @brief - Initialize CAU status block entry
++ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
++ *
++ * @p_hwfn: HW device data.
++ * @p_sb_entry: Pointer SB entry.
++ * @pf_id: PF number
++ * @vf_number: VF number
++ * @vf_valid: VF valid or not.
+ *
+- * @param p_hwfn
+- * @param p_sb_entry
+- * @param pf_id
+- * @param vf_number
+- * @param vf_valid
++ * Return: Void.
+ */
+ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+index db926d8b30334..f111391772778 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+@@ -1000,13 +1000,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
+ }
+
+ static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
+- struct qed_iscsi_stats *stats)
++ struct qed_iscsi_stats *stats,
++ bool is_atomic)
+ {
+ struct qed_ptt *p_ptt;
+
+ memset(stats, 0, sizeof(*stats));
+
+- p_ptt = qed_ptt_acquire(p_hwfn);
++ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EAGAIN;
+@@ -1337,9 +1338,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ }
+
++static int qed_iscsi_stats_context(struct qed_dev *cdev,
++ struct qed_iscsi_stats *stats,
++ bool is_atomic)
++{
++ return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
++}
++
+ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
+ {
+- return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
++ return qed_iscsi_stats_context(cdev, stats, false);
+ }
+
+ static int qed_iscsi_change_mac(struct qed_dev *cdev,
+@@ -1359,13 +1367,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
+ }
+
+ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+- struct qed_mcp_iscsi_stats *stats)
++ struct qed_mcp_iscsi_stats *stats,
++ bool is_atomic)
+ {
+ struct qed_iscsi_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+- if (qed_iscsi_stats(cdev, &proto_stats)) {
++ if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect ISCSI statistics\n");
+ return;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+index dab7a5d09f874..974cb8d26608c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+@@ -34,13 +34,19 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
+ void qed_iscsi_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief - Fills provided statistics struct with statistics.
++ * qed_get_protocol_stats_iscsi(): Fills provided statistics
++ * struct with statistics.
+ *
+- * @param cdev
+- * @param stats - points to struct that will be filled with statistics.
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: Void.
+ */
+ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+- struct qed_mcp_iscsi_stats *stats);
++ struct qed_mcp_iscsi_stats *stats,
++ bool is_atomic);
+ #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+ {
+@@ -53,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+
+ static inline void
+ qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+- struct qed_mcp_iscsi_stats *stats) {}
++ struct qed_mcp_iscsi_stats *stats,
++ bool is_atomic) {}
+ #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index bc17bc36d346e..6ffa6425a75a5 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ }
+
+ static void _qed_get_vport_stats(struct qed_dev *cdev,
+- struct qed_eth_stats *stats)
++ struct qed_eth_stats *stats,
++ bool is_atomic)
+ {
+ u8 fw_vport = 0;
+ int i;
+@@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+- struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+- : NULL;
++ struct qed_ptt *p_ptt;
+ bool b_get_port_stats;
+
++ p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
++ : NULL;
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+@@ -1900,6 +1902,13 @@ out:
+ }
+
+ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
++{
++ qed_get_vport_stats_context(cdev, stats, false);
++}
++
++void qed_get_vport_stats_context(struct qed_dev *cdev,
++ struct qed_eth_stats *stats,
++ bool is_atomic)
+ {
+ u32 i;
+
+@@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+ return;
+ }
+
+- _qed_get_vport_stats(cdev, stats);
++ _qed_get_vport_stats(cdev, stats, is_atomic);
+
+ if (!cdev->reset_stats)
+ return;
+@@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
+ if (!cdev->reset_stats) {
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ } else {
+- _qed_get_vport_stats(cdev, cdev->reset_stats);
++ _qed_get_vport_stats(cdev, cdev->reset_stats, false);
+ cdev->reset_stats->common.link_change_count = 0;
+ }
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+index 8eceeebb1a7be..602a12a348b2e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+@@ -92,18 +92,18 @@ struct qed_filter_mcast {
+ };
+
+ /**
+- * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
++ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
+ *
+- * @param p_hwfn
+- * @param p_rxq Handler of queue to close
+- * @param eq_completion_only If True completion will be on
+- * EQe, if False completion will be
+- * on EQe if p_hwfn opaque
+- * different from the RXQ opaque
+- * otherwise on CQe.
+- * @param cqe_completion If True completion will be
+- * receive on CQe.
+- * @return int
++ * @p_hwfn: HW device data.
++ * @p_rxq: Handler of queue to close
++ * @eq_completion_only: If True completion will be on
++ * EQe, if False completion will be
++ * on EQe if p_hwfn opaque
++ * different from the RXQ opaque
++ * otherwise on CQe.
++ * @cqe_completion: If True completion will be receive on CQe.
++ *
++ * Return: Int.
+ */
+ int
+ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+@@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ bool eq_completion_only, bool cqe_completion);
+
+ /**
+- * @brief qed_eth_tx_queue_stop - closes a Tx queue
++ * qed_eth_tx_queue_stop(): Closes a Tx queue.
+ *
+- * @param p_hwfn
+- * @param p_txq - handle to Tx queue needed to be closed
++ * @p_hwfn: HW device data.
++ * @p_txq: handle to Tx queue needed to be closed.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
+
+@@ -205,16 +205,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_comp_data);
+
+ /**
+- * @brief qed_sp_vport_stop -
+- *
+- * This ramrod closes a VPort after all its RX and TX queues are terminated.
+- * An Assert is generated if any queues are left open.
++ * qed_sp_vport_stop: This ramrod closes a VPort after all its
++ * RX and TX queues are terminated.
++ * An Assert is generated if any queues are left open.
+ *
+- * @param p_hwfn
+- * @param opaque_fid
+- * @param vport_id VPort ID
++ * @p_hwfn: HW device data.
++ * @opaque_fid: Opaque FID
++ * @vport_id: VPort ID.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+@@ -225,22 +224,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_comp_data);
+
+ /**
+- * @brief qed_sp_rx_eth_queues_update -
+- *
+- * This ramrod updates an RX queue. It is used for setting the active state
+- * of the queue and updating the TPA and SGE parameters.
+- *
+- * @note At the moment - only used by non-linux VFs.
++ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
++ * It is used for setting the active state
++ * of the queue and updating the TPA and
++ * SGE parameters.
++ * @p_hwfn: HW device data.
++ * @pp_rxq_handlers: An array of queue handlers to be updated.
++ * @num_rxqs: number of queues to update.
++ * @complete_cqe_flg: Post completion to the CQE Ring if set.
++ * @complete_event_flg: Post completion to the Event Ring if set.
++ * @comp_mode: Comp mode.
++ * @p_comp_data: Pointer Comp data.
+ *
+- * @param p_hwfn
+- * @param pp_rxq_handlers An array of queue handlers to be updated.
+- * @param num_rxqs number of queues to update.
+- * @param complete_cqe_flg Post completion to the CQE Ring if set
+- * @param complete_event_flg Post completion to the Event Ring if set
+- * @param comp_mode
+- * @param p_comp_data
++ * Return: Int.
+ *
+- * @return int
++ * Note At the moment - only used by non-linux VFs.
+ */
+
+ int
+@@ -252,35 +250,61 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
++/**
++ * qed_get_vport_stats(): Fills provided statistics
++ * struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ *
++ * Return: Void.
++ */
+ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
++/**
++ * qed_get_vport_stats_context(): Fills provided statistics
++ * struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: Void.
++ */
++void qed_get_vport_stats_context(struct qed_dev *cdev,
++ struct qed_eth_stats *stats,
++ bool is_atomic);
++
+ void qed_reset_vport_stats(struct qed_dev *cdev);
+
+ /**
+- * *@brief qed_arfs_mode_configure -
+- *
+- **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+- **and atleast one of ipv4 or ipv6 true to enable rfs mode.
++ * qed_arfs_mode_configure(): Enable or disable rfs mode.
++ * It must accept at least one of tcp or udp true
++ * and at least one of ipv4 or ipv6 true to enable
++ * rfs mode.
+ *
+- **@param p_hwfn
+- **@param p_ptt
+- **@param p_cfg_params - arfs mode configuration parameters.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_cfg_params: arfs mode configuration parameters.
+ *
++ * Return. Void.
+ */
+ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params);
+
+ /**
+- * @brief - qed_configure_rfs_ntuple_filter
++ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
++ * or remove arfs hw filter
+ *
+- * This ramrod should be used to add or remove arfs hw filter
++ * @p_hwfn: HW device data.
++ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
++ * it with cookie and callback function address, if not
++ * using this mode then client must pass NULL.
++ * @p_params: Pointer to params.
+ *
+- * @params p_hwfn
+- * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+- * it with cookie and callback function address, if not
+- * using this mode then client must pass NULL.
+- * @params p_params
++ * Return: Void.
+ */
+ int
+ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+@@ -374,16 +398,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+ /**
+- * @brief - Starts an Rx queue, when queue_cid is already prepared
++ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
++ * already prepared
+ *
+- * @param p_hwfn
+- * @param p_cid
+- * @param bd_max_bytes
+- * @param bd_chain_phys_addr
+- * @param cqe_pbl_addr
+- * @param cqe_pbl_size
++ * @p_hwfn: HW device data.
++ * @p_cid: Pointer CID.
++ * @bd_max_bytes: Max bytes.
++ * @bd_chain_phys_addr: Chain physcial address.
++ * @cqe_pbl_addr: PBL address.
++ * @cqe_pbl_size: PBL size.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int
+ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+@@ -393,15 +418,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+ /**
+- * @brief - Starts a Tx queue, where queue_cid is already prepared
++ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
++ * already prepared
+ *
+- * @param p_hwfn
+- * @param p_cid
+- * @param pbl_addr
+- * @param pbl_size
+- * @param p_pq_params - parameters for choosing the PQ for this Tx queue
++ * @p_hwfn: HW device data.
++ * @p_cid: Pointer CID.
++ * @pbl_addr: PBL address.
++ * @pbl_size: PBL size.
++ * @pq_id: Parameters for choosing the PQ for this Tx queue.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int
+ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+index df88d00053a29..f80f7739ff8d6 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+@@ -119,41 +119,41 @@ struct qed_ll2_info {
+ extern const struct qed_ll2_ops qed_ll2_ops_pass;
+
+ /**
+- * @brief qed_ll2_acquire_connection - allocate resources,
+- * starts rx & tx (if relevant) queues pair. Provides
+- * connecion handler as output parameter.
++ * qed_ll2_acquire_connection(): Allocate resources,
++ * starts rx & tx (if relevant) queues pair.
++ * Provides connecion handler as output
++ * parameter.
+ *
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @data: Describes connection parameters.
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param data - describes connection parameters
+- * @return int
++ * Return: Int.
+ */
+ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
+
+ /**
+- * @brief qed_ll2_establish_connection - start previously
+- * allocated LL2 queues pair
++ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param p_ptt
+- * @param connection_handle LL2 connection's handle obtained from
+- * qed_ll2_require_connection
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
+
+ /**
+- * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
++ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle LL2 connection's handle obtained from
+- * qed_ll2_require_connection
+- * @param addr rx (physical address) buffers to submit
+- * @param cookie
+- * @param notify_fw produce corresponding Rx BD immediately
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
++ * @addr: RX (physical address) buffers to submit.
++ * @buf_len: Buffer Len.
++ * @cookie: Cookie.
++ * @notify_fw: Produce corresponding Rx BD immediately.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_post_rx_buffer(void *cxt,
+ u8 connection_handle,
+@@ -161,15 +161,15 @@ int qed_ll2_post_rx_buffer(void *cxt,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+ /**
+- * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+- * to prepare Tx packet submission to FW.
++ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
++ * to prepare Tx packet submission to FW.
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle
+- * @param pkt - info regarding the tx packet
+- * @param notify_fw - issue doorbell to fw for this packet
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: Connection handle.
++ * @pkt: Info regarding the tx packet.
++ * @notify_fw: Issue doorbell to fw for this packet.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_prepare_tx_packet(void *cxt,
+ u8 connection_handle,
+@@ -177,81 +177,83 @@ int qed_ll2_prepare_tx_packet(void *cxt,
+ bool notify_fw);
+
+ /**
+- * @brief qed_ll2_release_connection - releases resources
+- * allocated for LL2 connection
++ * qed_ll2_release_connection(): Releases resources allocated for LL2
++ * connection.
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle LL2 connection's handle obtained from
+- * qed_ll2_require_connection
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
++ *
++ * Return: Void.
+ */
+ void qed_ll2_release_connection(void *cxt, u8 connection_handle);
+
+ /**
+- * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
+- * Tx BD of BDs requested by
+- * qed_ll2_prepare_tx_packet
++ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
++ * Tx BD of BDs requested by
++ * qed_ll2_prepare_tx_packet
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle LL2 connection's handle
+- * obtained from
+- * qed_ll2_require_connection
+- * @param addr
+- * @param nbytes
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
++ * @addr: Address.
++ * @nbytes: Number of bytes.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_set_fragment_of_tx_packet(void *cxt,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+ /**
+- * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
+- *
++ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle LL2 connection's handle
+- * obtained from
+- * qed_ll2_require_connection
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
+
+ /**
+- * @brief qed_ll2_get_stats - get LL2 queue's statistics
+- *
++ * qed_ll2_get_stats(): Get LL2 queue's statistics
+ *
+- * @param cxt - pointer to the hw-function [opaque to some]
+- * @param connection_handle LL2 connection's handle obtained from
+- * qed_ll2_require_connection
+- * @param p_stats
++ * @cxt: Pointer to the hw-function [opaque to some].
++ * @connection_handle: LL2 connection's handle obtained from
++ * qed_ll2_require_connection.
++ * @p_stats: Pointer Status.
+ *
+- * @return 0 on success, failure otherwise
++ * Return: 0 on success, failure otherwise.
+ */
+ int qed_ll2_get_stats(void *cxt,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+ /**
+- * @brief qed_ll2_alloc - Allocates LL2 connections set
++ * qed_ll2_alloc(): Allocates LL2 connections set.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ll2_setup - Inits LL2 connections set
++ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ *
+ */
+ void qed_ll2_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_ll2_free - Releases LL2 connections set
++ * qed_ll2_free(): Releases LL2 connections set
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ *
+ */
+ void qed_ll2_free(struct qed_hwfn *p_hwfn);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index d10e1cd6d2ba9..26700b0b4b370 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -3054,7 +3054,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+- qed_get_vport_stats(cdev, &eth_stats);
++ qed_get_vport_stats_context(cdev, &eth_stats, true);
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+@@ -3062,10 +3062,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+ stats->lan_stats.fcs_err = -1;
+ break;
+ case QED_MCP_FCOE_STATS:
+- qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
++ qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
+ break;
+ case QED_MCP_ISCSI_STATS:
+- qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
++ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
+ break;
+ default:
+ DP_VERBOSE(cdev, QED_MSG_SP,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index 8edb450d0abfc..352b757183e8e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -266,97 +266,97 @@ union qed_mfw_tlv_data {
+ #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
+
+ /**
+- * @brief - returns the link params of the hw function
++ * qed_mcp_get_link_params(): Returns the link params of the hw function.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @returns pointer to link params
++ * Returns: Pointer to link params.
+ */
+-struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
++struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief - return the link state of the hw function
++ * qed_mcp_get_link_state(): Return the link state of the hw function.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @returns pointer to link state
++ * Returns: Pointer to link state.
+ */
+-struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
++struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief - return the link capabilities of the hw function
++ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
++ * hw function.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @returns pointer to link capabilities
++ * Returns: Pointer to link capabilities.
+ */
+ struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief Request the MFW to set the the link according to 'link_input'.
++ * qed_mcp_set_link(): Request the MFW to set the link according
++ * to 'link_input'.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param b_up - raise link if `true'. Reset link if `false'.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @b_up: Raise link if `true'. Reset link if `false'.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+ /**
+- * @brief Get the management firmware version value
++ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_mfw_ver - mfw version value
+- * @param p_running_bundle_id - image id in nvram; Optional.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_mfw_ver: MFW version value.
++ * @p_running_bundle_id: Image id in nvram; Optional.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - operation was successful.
+ */
+ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
+
+ /**
+- * @brief Get the MBI version value
++ * qed_mcp_get_mbi_ver(): Get the MBI version value.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - operation was successful.
+ */
+ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver);
+
+ /**
+- * @brief Get media type value of the port.
++ * qed_mcp_get_media_type(): Get media type value of the port.
+ *
+- * @param cdev - qed dev pointer
+- * @param p_ptt
+- * @param mfw_ver - media type value
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @media_type: Media type value
+ *
+- * @return int -
+- * 0 - Operation was successul.
+- * -EBUSY - Operation failed
++ * Return: Int - 0 - Operation was successul.
++ * -EBUSY - Operation failed
+ */
+ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *media_type);
+
+ /**
+- * @brief Get transceiver data of the port.
++ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
+ *
+- * @param cdev - qed dev pointer
+- * @param p_ptt
+- * @param p_transceiver_state - transceiver state.
+- * @param p_transceiver_type - media type value
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_transceiver_state: Transceiver state.
++ * @p_tranceiver_type: Media type value.
+ *
+- * @return int -
+- * 0 - Operation was successful.
+- * -EBUSY - Operation failed
++ * Return: Int - 0 - Operation was successul.
++ * -EBUSY - Operation failed
+ */
+ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ u32 *p_tranceiver_type);
+
+ /**
+- * @brief Get transceiver supported speed mask.
++ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
+ *
+- * @param cdev - qed dev pointer
+- * @param p_ptt
+- * @param p_speed_mask - Bit mask of all supported speeds.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_speed_mask: Bit mask of all supported speeds.
+ *
+- * @return int -
+- * 0 - Operation was successful.
+- * -EBUSY - Operation failed
++ * Return: Int - 0 - Operation was successul.
++ * -EBUSY - Operation failed
+ */
+
+ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask);
+
+ /**
+- * @brief Get board configuration.
++ * qed_mcp_get_board_config(): Get board configuration.
+ *
+- * @param cdev - qed dev pointer
+- * @param p_ptt
+- * @param p_board_config - Board config.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_board_config: Board config.
+ *
+- * @return int -
+- * 0 - Operation was successful.
+- * -EBUSY - Operation failed
++ * Return: Int - 0 - Operation was successul.
++ * -EBUSY - Operation failed
+ */
+ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config);
+
+ /**
+- * @brief General function for sending commands to the MCP
+- * mailbox. It acquire mutex lock for the entire
+- * operation, from sending the request until the MCP
+- * response. Waiting for MCP response will be checked up
+- * to 5 seconds every 5ms.
++ * qed_mcp_cmd(): General function for sending commands to the MCP
++ * mailbox. It acquire mutex lock for the entire
++ * operation, from sending the request until the MCP
++ * response. Waiting for MCP response will be checked up
++ * to 5 seconds every 5ms.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
+- * @param cmd - command to be sent to the MCP.
+- * @param param - Optional param
+- * @param o_mcp_resp - The MCP response code (exclude sequence).
+- * @param o_mcp_param- Optional parameter provided by the MCP
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @cmd: command to be sent to the MCP.
++ * @param: Optional param
++ * @o_mcp_resp: The MCP response code (exclude sequence).
++ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+- * @return int - 0 - operation
+- * was successul.
++ *
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ u32 *o_mcp_param);
+
+ /**
+- * @brief - drains the nig, allowing completion to pass in case of pauses.
+- * (Should be called only from sleepable context)
++ * qed_mcp_drain(): drains the nig, allowing completion to pass in
++ * case of pauses.
++ * (Should be called only from sleepable context)
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ *
++ * Return: Int.
+ */
+ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Get the flash size value
++ * qed_mcp_get_flash_size(): Get the flash size value.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_flash_size - flash size in bytes to be filled.
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @p_flash_size: Flash size in bytes to be filled.
+ *
+- * @return int - 0 - operation was successul.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+ /**
+- * @brief Send driver version to MFW
++ * qed_mcp_send_drv_version(): Send driver version to MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param version - Version value
+- * @param name - Protocol driver name
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @p_ver: Version value.
+ *
+- * @return int - 0 - operation was successul.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+@@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_drv_version *p_ver);
+
+ /**
+- * @brief Read the MFW process kill counter
++ * qed_get_process_kill_counter(): Read the MFW process kill counter.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
+ *
+- * @return u32
++ * Return: u32.
+ */
+ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Trigger a recovery process
++ * qed_start_recovery_process(): Trigger a recovery process.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief A recovery handler must call this function as its first step.
+- * It is assumed that the handler is not run from an interrupt context.
++ * qed_recovery_prolog(): A recovery handler must call this function
++ * as its first step.
++ * It is assumed that the handler is not run from
++ * an interrupt context.
+ *
+- * @param cdev
+- * @param p_ptt
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: int.
+ */
+ int qed_recovery_prolog(struct qed_dev *cdev);
+
+ /**
+- * @brief Notify MFW about the change in base device properties
++ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
++ * device properties
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param client - qed client type
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @client: Qed client type.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_client client);
+
+ /**
+- * @brief Notify MFW about the driver state
++ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param drv_state - Driver state
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @drv_state: Driver state.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_driver_state drv_state);
+
+ /**
+- * @brief Send MTU size to MFW
++ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param mtu - MTU size
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @mtu: MTU size.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 mtu);
+
+ /**
+- * @brief Send MAC address to MFW
++ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param mac - MAC address
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @mac: MAC address.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *mac);
+
+ /**
+- * @brief Send WOL mode to MFW
++ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param wol - WOL mode
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @wol: WOL mode.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_wol wol);
+
+ /**
+- * @brief Set LED status
++ * qed_mcp_set_led(): Set LED status.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param mode - LED mode
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @mode: LED mode.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_led_mode mode);
+
+ /**
+- * @brief Read from nvm
++ * qed_mcp_nvm_read(): Read from NVM.
+ *
+- * @param cdev
+- * @param addr - nvm offset
+- * @param p_buf - nvm read buffer
+- * @param len - buffer len
++ * @cdev: Qed dev pointer.
++ * @addr: NVM offset.
++ * @p_buf: NVM read buffer.
++ * @len: Buffer len.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+
+ /**
+- * @brief Write to nvm
++ * qed_mcp_nvm_write(): Write to NVM.
+ *
+- * @param cdev
+- * @param addr - nvm offset
+- * @param cmd - nvm command
+- * @param p_buf - nvm write buffer
+- * @param len - buffer len
++ * @cdev: Qed dev pointer.
++ * @addr: NVM offset.
++ * @cmd: NVM command.
++ * @p_buf: NVM write buffer.
++ * @len: Buffer len.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_nvm_write(struct qed_dev *cdev,
+ u32 cmd, u32 addr, u8 *p_buf, u32 len);
+
+ /**
+- * @brief Check latest response
++ * qed_mcp_nvm_resp(): Check latest response.
+ *
+- * @param cdev
+- * @param p_buf - nvm write buffer
++ * @cdev: Qed dev pointer.
++ * @p_buf: NVM write buffer.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
+
+@@ -604,13 +606,13 @@ struct qed_nvm_image_att {
+ };
+
+ /**
+- * @brief Allows reading a whole nvram image
++ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
+ *
+- * @param p_hwfn
+- * @param image_id - image to get attributes for
+- * @param p_image_att - image attributes structure into which to fill data
++ * @p_hwfn: HW device data.
++ * @image_id: Image to get attributes for.
++ * @p_image_att: Image attributes structure into which to fill data.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+@@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_nvm_image_att *p_image_att);
+
+ /**
+- * @brief Allows reading a whole nvram image
++ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
+ *
+- * @param p_hwfn
+- * @param image_id - image requested for reading
+- * @param p_buffer - allocated buffer into which to fill data
+- * @param buffer_len - length of the allocated buffer.
++ * @p_hwfn: HW device data.
++ * @image_id: image requested for reading.
++ * @p_buffer: allocated buffer into which to fill data.
++ * @buffer_len: length of the allocated buffer.
+ *
+- * @return 0 iff p_buffer now contains the nvram image.
++ * Return: 0 if p_buffer now contains the nvram image.
+ */
+ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len);
+
+ /**
+- * @brief Bist register test
++ * qed_mcp_bist_register_test(): Bist register test.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Bist clock test
++ * qed_mcp_bist_clock_test(): Bist clock test.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Bist nvm test - get number of images
++ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
+- * @param num_images - number of images if operation was
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @num_images: number of images if operation was
+ * successful. 0 if not.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images);
+
+ /**
+- * @brief Bist nvm test - get image attributes by index
++ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
++ * by index.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
+- * @param p_image_att - Attributes of image
+- * @param image_index - Index of image to get information for
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @p_image_att: Attributes of image.
++ * @image_index: Index of image to get information for.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ u32 image_index);
+
+ /**
+- * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+- * from the qed client and send it to the MFW.
++ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
++ * get the required TLV info
++ * from the qed client and send it to the MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Send raw debug data to the MFW
++ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_buf: raw debug data buffer.
++ * @size: Buffer size.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_buf - raw debug data buffer
+- * @param size - buffer size
++ * Return : Int.
+ */
+ int
+ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+@@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
+ }
+
+ /**
+- * @brief Initialize the interface with the MCP
++ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
+ *
+- * @param p_hwfn - HW func
+- * @param p_ptt - PTT required for register access
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Initialize the port interface with the MCP
++ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Void.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+ /**
+- * @brief Releases resources allocated during the init process.
++ * qed_mcp_free(): Releases resources allocated during the init process.
+ *
+- * @param p_hwfn - HW func
+- * @param p_ptt - PTT required for register access
++ * @p_hwfn: HW function.
+ *
+- * @return int
++ * Return: Int.
+ */
+
+ int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief This function is called from the DPC context. After
+- * pointing PTT to the mfw mb, check for events sent by the MCP
+- * to the driver and ack them. In case a critical event
+- * detected, it will be handled here, otherwise the work will be
+- * queued to a sleepable work-queue.
++ * qed_mcp_handle_events(): This function is called from the DPC context.
++ * After pointing PTT to the mfw mb, check for events sent by
++ * the MCP to the driver and ack them. In case a critical event
++ * detected, it will be handled here, otherwise the work will be
++ * queued to a sleepable work-queue.
++ *
++ * @p_hwfn: HW function.
++ * @p_ptt: PTT required for register access.
+ *
+- * @param p_hwfn - HW function
+- * @param p_ptt - PTT required for register access
+- * @return int - 0 - operation
+- * was successul.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+@@ -858,106 +866,111 @@ struct qed_load_req_params {
+ };
+
+ /**
+- * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+- * returns whether this PF is the first on the engine/port or function.
++ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
++ * operation succeeds, returns whether this PF is
++ * the first on the engine/port or function.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_params
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_params: Params.
+ *
+- * @return int - 0 - Operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params);
+
+ /**
+- * @brief Sends a LOAD_DONE message to the MFW
++ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int - 0 - Operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Sends a UNLOAD_REQ message to the MFW
++ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int - 0 - Operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Sends a UNLOAD_DONE message to the MFW
++ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int - 0 - Operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Read the MFW mailbox into Current buffer.
++ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Void.
+ */
+ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Ack to mfw that driver finished FLR process for VFs
++ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
+ *
+- * @param return int - 0 upon success.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+ /**
+- * @brief - calls during init to read shmem of all function-related info.
++ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
++ * all function-related info.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief - Reset the MCP using mailbox command.
++ * qed_mcp_reset(): Reset the MCP using mailbox command.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+ /**
+- * @brief - Sends an NVM read command request to the MFW to get
+- * a buffer.
++ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
++ * a buffer.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+- * DRV_MSG_CODE_NVM_READ_NVRAM commands
+- * @param param - [0:23] - Offset [24:31] - Size
+- * @param o_mcp_resp - MCP response
+- * @param o_mcp_param - MCP response param
+- * @param o_txn_size - Buffer size output
+- * @param o_buf - Pointer to the buffer returned by the MFW.
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
++ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
++ * @param: [0:23] - Offset [24:31] - Size.
++ * @o_mcp_resp: MCP response.
++ * @o_mcp_param: MCP response param.
++ * @o_txn_size: Buffer size output.
++ * @o_buf: Pointer to the buffer returned by the MFW.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -967,60 +980,61 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+
+ /**
+- * @brief Read from sfp
++ * qed_mcp_phy_sfp_read(): Read from sfp.
+ *
+- * @param p_hwfn - hw function
+- * @param p_ptt - PTT required for register access
+- * @param port - transceiver port
+- * @param addr - I2C address
+- * @param offset - offset in sfp
+- * @param len - buffer length
+- * @param p_buf - buffer to read into
++ * @p_hwfn: HW device data.
++ * @p_ptt: PTT required for register access.
++ * @port: transceiver port.
++ * @addr: I2C address.
++ * @offset: offset in sfp.
++ * @len: buffer length.
++ * @p_buf: buffer to read into.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+ /**
+- * @brief indicates whether the MFW objects [under mcp_info] are accessible
++ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
++ * are accessible
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return true iff MFW is running and mcp_info is initialized
++ * Return: true if MFW is running and mcp_info is initialized.
+ */
+ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief request MFW to configure MSI-X for a VF
++ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param vf_id - absolute inside engine
+- * @param num_sbs - number of entries to request
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @vf_id: absolute inside engine.
++ * @num: number of entries to request.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+ /**
+- * @brief - Halt the MCP.
++ * qed_mcp_halt(): Halt the MCP.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief - Wake up the MCP.
++ * qed_mcp_resume: Wake up the MCP.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param return 0 upon success.
++ * Return: 0 upon success.
+ */
+ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+@@ -1038,13 +1052,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities);
+
+-/* @brief - Gets the mdump retained data from the MFW.
++/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_mdump_retain
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_mdump_retain: mdump retain.
+ *
+- * @param return 0 upon success.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+@@ -1052,15 +1066,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
+ /**
+- * @brief - Sets the MFW's max value for the given resource
++ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param res_id
+- * @param resc_max_val
+- * @param p_mcp_resp
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @res_id: RES ID.
++ * @resc_max_val: Resec max val.
++ * @p_mcp_resp: MCP Resp
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+@@ -1069,16 +1083,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ u32 resc_max_val, u32 *p_mcp_resp);
+
+ /**
+- * @brief - Gets the MFW allocation info for the given resource
++ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
++ * resource.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param res_id
+- * @param p_mcp_resp
+- * @param p_resc_num
+- * @param p_resc_start
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @res_id: Res ID.
++ * @p_mcp_resp: MCP resp.
++ * @p_resc_num: Resc num.
++ * @p_resc_start: Resc start.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+@@ -1087,13 +1102,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+ /**
+- * @brief Send eswitch mode to MFW
++ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param eswitch - eswitch mode
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @eswitch: eswitch mode.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+@@ -1113,12 +1128,12 @@ enum qed_resc_lock {
+ };
+
+ /**
+- * @brief - Initiates PF FLR
++ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+ struct qed_resc_lock_params {
+@@ -1151,13 +1166,13 @@ struct qed_resc_lock_params {
+ };
+
+ /**
+- * @brief Acquires MFW generic resource lock
++ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_params
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_params: Params.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+@@ -1175,13 +1190,13 @@ struct qed_resc_unlock_params {
+ };
+
+ /**
+- * @brief Releases MFW generic resource lock
++ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_params
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_params: Params.
+ *
+- * @return int - 0 - operation was successful.
++ * Return: Int - 0 - Operation was successul.
+ */
+ int
+ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+@@ -1189,12 +1204,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_resc_unlock_params *p_params);
+
+ /**
+- * @brief - default initialization for lock/unlock resource structs
++ * qed_mcp_resc_lock_default_init(): Default initialization for
++ * lock/unlock resource structs.
+ *
+- * @param p_lock - lock params struct to be initialized; Can be NULL
+- * @param p_unlock - unlock params struct to be initialized; Can be NULL
+- * @param resource - the requested resource
+- * @paral b_is_permanent - disable retries & aging when set
++ * @p_lock: lock params struct to be initialized; Can be NULL.
++ * @p_unlock: unlock params struct to be initialized; Can be NULL.
++ * @resource: the requested resource.
++ * @b_is_permanent: disable retries & aging when set.
++ *
++ * Return: Void.
+ */
+ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+@@ -1202,94 +1220,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ resource, bool b_is_permanent);
+
+ /**
+- * @brief - Return whether management firmware support smart AN
++ * qed_mcp_is_smart_an_supported(): Return whether management firmware
++ * support smart AN
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return bool - true if feature is supported.
++ * Return: bool true if feature is supported.
+ */
+ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief Learn of supported MFW features; To be done during early init
++ * qed_mcp_get_capabilities(): Learn of supported MFW features;
++ * To be done during early init.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Int.
+ */
+ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Inform MFW of set of features supported by driver. Should be done
+- * inside the content of the LOAD_REQ.
++ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
++ * by driver. Should be done inside the content
++ * of the LOAD_REQ.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Return: Int.
+ */
+ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Read ufp config from the shared memory.
++ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * Return: Void.
+ */
+ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Populate the nvm info shadow in the given hardware function
++ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
++ * hardware function.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Int.
+ */
+ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief Delete nvm info shadow in the given hardware function
++ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
++ * hardware function.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief Get the engine affinity configuration.
++ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Int.
+ */
+ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Get the PPFID bitmap.
++ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
+ *
+- * @param p_hwfn
+- * @param p_ptt
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ *
++ * Return: Int.
+ */
+ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+ /**
+- * @brief Get NVM config attribute value.
++ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @option_id: Option ID.
++ * @entity_id: Entity ID.
++ * @flags: Flags.
++ * @p_buf: Buf.
++ * @p_len: Len.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param option_id
+- * @param entity_id
+- * @param flags
+- * @param p_buf
+- * @param p_len
++ * Return: Int.
+ */
+ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len);
+
+ /**
+- * @brief Set NVM config attribute value.
++ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param option_id
+- * @param entity_id
+- * @param flags
+- * @param p_buf
+- * @param len
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @option_id: Option ID.
++ * @entity_id: Entity ID.
++ * @flags: Flags.
++ * @p_buf: Buf.
++ * @len: Len.
++ *
++ * Return: Int.
+ */
+ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+index e27dd9a4547e8..7a3bd749e1e4c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+@@ -6,47 +6,47 @@
+ #include <linux/types.h>
+
+ /**
+- * @brief qed_selftest_memory - Perform memory test
++ * qed_selftest_memory(): Perform memory test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_selftest_memory(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_selftest_interrupt - Perform interrupt test
++ * qed_selftest_interrupt(): Perform interrupt test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_selftest_interrupt(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_selftest_register - Perform register test
++ * qed_selftest_register(): Perform register test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_selftest_register(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_selftest_clock - Perform clock test
++ * qed_selftest_clock(): Perform clock test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_selftest_clock(struct qed_dev *cdev);
+
+ /**
+- * @brief qed_selftest_nvram - Perform nvram test
++ * qed_selftest_nvram(): Perform nvram test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_selftest_nvram(struct qed_dev *cdev);
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+index 60ff3222bf551..c5a38f3c92b04 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+@@ -31,23 +31,18 @@ struct qed_spq_comp_cb {
+ };
+
+ /**
+- * @brief qed_eth_cqe_completion - handles the completion of a
+- * ramrod on the cqe ring
++ * qed_eth_cqe_completion(): handles the completion of a
++ * ramrod on the cqe ring.
+ *
+- * @param p_hwfn
+- * @param cqe
++ * @p_hwfn: HW device data.
++ * @cqe: CQE.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+-/**
+- * @file
+- *
+- * QED Slow-hwfn queue interface
+- */
+-
++ /* QED Slow-hwfn queue interface */
+ union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+@@ -207,117 +202,128 @@ struct qed_spq {
+ };
+
+ /**
+- * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+- * Pends it to the future list.
++ * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
++ * Pends it to the future list.
+ *
+- * @param p_hwfn
+- * @param p_req
++ * @p_hwfn: HW device data.
++ * @p_ent: Ent.
++ * @fw_return_code: Return code from firmware.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+ /**
+- * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
++ * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_spq_setup - Reset the SPQ to its start state.
++ * qed_spq_setup(): Reset the SPQ to its start state.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
++ * qed_spq_free(): Deallocates the given SPQ struct.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_spq_get_entry - Obtain an entrry from the spq
+- * free pool list.
+- *
+- *
++ * qed_spq_get_entry(): Obtain an entrry from the spq
++ * free pool list.
+ *
+- * @param p_hwfn
+- * @param pp_ent
++ * @p_hwfn: HW device data.
++ * @pp_ent: PP ENT.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int
+ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+ /**
+- * @brief qed_spq_return_entry - Return an entry to spq free
+- * pool list
++ * qed_spq_return_entry(): Return an entry to spq free pool list.
+ *
+- * @param p_hwfn
+- * @param p_ent
++ * @p_hwfn: HW device data.
++ * @p_ent: P ENT.
++ *
++ * Return: Void.
+ */
+ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+ /**
+- * @brief qed_eq_allocate - Allocates & initializes an EQ struct
++ * qed_eq_alloc(): Allocates & initializes an EQ struct.
+ *
+- * @param p_hwfn
+- * @param num_elem number of elements in the eq
++ * @p_hwfn: HW device data.
++ * @num_elem: number of elements in the eq.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
+
+ /**
+- * @brief qed_eq_setup - Reset the EQ to its start state.
++ * qed_eq_setup(): Reset the EQ to its start state.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_eq_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_eq_free - deallocates the given EQ struct.
++ * qed_eq_free(): deallocates the given EQ struct.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_eq_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_eq_prod_update - update the FW with default EQ producer
++ * qed_eq_prod_update(): update the FW with default EQ producer.
++ *
++ * @p_hwfn: HW device data.
++ * @prod: Prod.
+ *
+- * @param p_hwfn
+- * @param prod
++ * Return: Void.
+ */
+ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+ /**
+- * @brief qed_eq_completion - Completes currently pending EQ elements
++ * qed_eq_completion(): Completes currently pending EQ elements.
+ *
+- * @param p_hwfn
+- * @param cookie
++ * @p_hwfn: HW device data.
++ * @cookie: Cookie.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+ /**
+- * @brief qed_spq_completion - Completes a single event
++ * qed_spq_completion(): Completes a single event.
+ *
+- * @param p_hwfn
+- * @param echo - echo value from cookie (used for determining completion)
+- * @param p_data - data from cookie (used in callback function if applicable)
++ * @p_hwfn: HW device data.
++ * @echo: echo value from cookie (used for determining completion).
++ * @fw_return_code: FW return code.
++ * @p_data: data from cookie (used in callback function if applicable).
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+@@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ union event_ring_data *p_data);
+
+ /**
+- * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
++ * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return u32 - SPQ CID
++ * Return: u32 - SPQ CID.
+ */
+ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+- * struct
++ * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_consq_setup - Reset the ConsQ to its start state.
++ * qed_consq_setup(): Reset the ConsQ to its start state.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return Void.
+ */
+ void qed_consq_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_consq_free - deallocates the given ConsQ struct.
++ * qed_consq_free(): deallocates the given ConsQ struct.
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return Void.
+ */
+ void qed_consq_free(struct qed_hwfn *p_hwfn);
+ int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
+
+-/**
+- * @file
+- *
+- * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+- */
++/* Slow-hwfn low-level commands (Ramrods) function definitions. */
+
+ #define QED_SP_EQ_COMPLETION 0x01
+ #define QED_SP_CQE_COMPLETION 0x02
+@@ -377,12 +382,15 @@ struct qed_sp_init_data {
+ };
+
+ /**
+- * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+- * Should be called on in error flows after initializing the SPQ entry
+- * and before posting it.
++ * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
++ * entry if allocated. Should be called on in error
++ * flows after initializing the SPQ entry
++ * and before posting it.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ent: Ent.
+ *
+- * @param p_hwfn
+- * @param p_ent
++ * Return: Void.
+ */
+ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+@@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_sp_init_data *p_data);
+
+ /**
+- * @brief qed_sp_pf_start - PF Function Start Ramrod
++ * qed_sp_pf_start(): PF Function Start Ramrod.
++ *
++ * @p_hwfn: HW device data.
++ * @p_ptt: P_ptt.
++ * @p_tunn: P_tunn.
++ * @allow_npar_tx_switch: Allow NPAR TX Switch.
++ *
++ * Return: Int.
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+@@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+- * @param p_hwfn
+- * @param p_ptt
+- * @param p_tunn
+- * @param allow_npar_tx_switch
+- *
+- * @return int
+ */
+
+ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+@@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ bool allow_npar_tx_switch);
+
+ /**
+- * @brief qed_sp_pf_update - PF Function Update Ramrod
++ * qed_sp_pf_update(): PF Function Update Ramrod.
+ *
+- * This ramrod updates function-related parameters. Every parameter can be
+- * updated independently, according to configuration flags.
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Int.
+ *
+- * @return int
++ * This ramrod updates function-related parameters. Every parameter can be
++ * updated independently, according to configuration flags.
+ */
+
+ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
++ * qed_sp_pf_update_stag(): Update firmware of new outer tag.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+- *
+- * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+- * sent and the last completion written to the PFs Event Ring. This ramrod also
+- * deletes the context for the Slowhwfn connection on this PF.
+- *
+- * @note Not required for first packet.
+- *
+- * @param p_hwfn
+- *
+- * @return int
+- */
+-
+-/**
+- * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
++ * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
+
+@@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+ /**
+- * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
++ * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+
+ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+index 7ff23ef8ccc17..0a1e44d45c1a2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+@@ -251,29 +251,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+
+ #ifdef CONFIG_QED_SRIOV
+ /**
+- * @brief Check if given VF ID @vfid is valid
+- * w.r.t. @b_enabled_only value
+- * if b_enabled_only = true - only enabled VF id is valid
+- * else any VF id less than max_vfs is valid
++ * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
++ * w.r.t. @b_enabled_only value
++ * if b_enabled_only = true - only enabled
++ * VF id is valid.
++ * else any VF id less than max_vfs is valid.
+ *
+- * @param p_hwfn
+- * @param rel_vf_id - Relative VF ID
+- * @param b_enabled_only - consider only enabled VF
+- * @param b_non_malicious - true iff we want to validate vf isn't malicious.
++ * @p_hwfn: HW device data.
++ * @rel_vf_id: Relative VF ID.
++ * @b_enabled_only: consider only enabled VF.
++ * @b_non_malicious: true iff we want to validate vf isn't malicious.
+ *
+- * @return bool - true for valid VF ID
++ * Return: bool - true for valid VF ID
+ */
+ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+ /**
+- * @brief - Given a VF index, return index of next [including that] active VF.
++ * qed_iov_get_next_active_vf(): Given a VF index, return index of
++ * next [including that] active VF.
+ *
+- * @param p_hwfn
+- * @param rel_vf_id
++ * @p_hwfn: HW device data.
++ * @rel_vf_id: VF ID.
+ *
+- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
++ * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+@@ -281,83 +283,92 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
+ /**
+- * @brief Read sriov related information and allocated resources
+- * reads from configuration space, shmem, etc.
++ * qed_iov_hw_info(): Read sriov related information and allocated resources
++ * reads from configuration space, shmem, etc.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
++ * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
+ *
+- * @param p_hwfn
+- * @param p_iov
+- * @param type
+- * @param length
++ * @p_hwfn: HW device data.
++ * @offset: offset.
++ * @type: Type
++ * @length: Length.
+ *
+- * @return pointer to the newly placed tlv
++ * Return: pointer to the newly placed tlv
+ */
+ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+ /**
+- * @brief list the types and lengths of the tlvs on the buffer
++ * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
+ *
+- * @param p_hwfn
+- * @param tlvs_list
++ * @p_hwfn: HW device data.
++ * @tlvs_list: Tlvs_list.
++ *
++ * Return: Void.
+ */
+ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+ /**
+- * @brief qed_iov_alloc - allocate sriov related resources
++ * qed_iov_alloc(): allocate sriov related resources
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_iov_setup - setup sriov related resources
++ * qed_iov_setup(): setup sriov related resources
++ *
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
++ * Return: Void.
+ */
+ void qed_iov_setup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_iov_free - free sriov related resources
++ * qed_iov_free(): free sriov related resources
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
++ *
++ * Return: Void.
+ */
+ void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief free sriov related memory that was allocated during hw_prepare
++ * qed_iov_free_hw_info(): free sriov related memory that was
++ * allocated during hw_prepare
++ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: Void.
+ */
+ void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+ /**
+- * @brief Mark structs of vfs that have been FLR-ed.
++ * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
+ *
+- * @param p_hwfn
+- * @param disabled_vfs - bitmask of all VFs on path that were FLRed
++ * @p_hwfn: HW device data.
++ * @disabled_vfs: bitmask of all VFs on path that were FLRed
+ *
+- * @return true iff one of the PF's vfs got FLRed. false otherwise.
++ * Return: true iff one of the PF's vfs got FLRed. false otherwise.
+ */
+ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+ /**
+- * @brief Search extended TLVs in request/reply buffer.
++ * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
+ *
+- * @param p_hwfn
+- * @param p_tlvs_list - Pointer to tlvs list
+- * @param req_type - Type of TLV
++ * @p_hwfn: HW device data.
++ * @p_tlvs_list: Pointer to tlvs list
++ * @req_type: Type of TLV
+ *
+- * @return pointer to tlv type if found, otherwise returns NULL.
++ * Return: pointer to tlv type if found, otherwise returns NULL.
+ */
+ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
+index 60d2bb64e65fb..976201fc7d4ae 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
+@@ -688,13 +688,16 @@ struct qed_vf_iov {
+ };
+
+ /**
+- * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+- * Coalesce value '0' will omit the configuration.
++ * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
++ * Coalesce value '0' will omit the
++ * configuration.
+ *
+- * @param p_hwfn
+- * @param rx_coal - coalesce value in micro second for rx queue
+- * @param tx_coal - coalesce value in micro second for tx queue
+- * @param p_cid - queue cid
++ * @p_hwfn: HW device data.
++ * @rx_coal: coalesce value in micro second for rx queue.
++ * @tx_coal: coalesce value in micro second for tx queue.
++ * @p_cid: queue cid.
++ *
++ * Return: Int.
+ *
+ **/
+ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+@@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+ u16 tx_coal, struct qed_queue_cid *p_cid);
+
+ /**
+- * @brief VF - Get coalesce per VF's relative queue.
++ * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
+ *
+- * @param p_hwfn
+- * @param p_coal - coalesce value in micro second for VF queues.
+- * @param p_cid - queue cid
++ * @p_hwfn: HW device data.
++ * @p_coal: coalesce value in micro second for VF queues.
++ * @p_cid: queue cid.
+ *
++ * Return: Int.
+ **/
+ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ u16 *p_coal, struct qed_queue_cid *p_cid);
+
+ #ifdef CONFIG_QED_SRIOV
+ /**
+- * @brief Read the VF bulletin and act on it if needed
++ * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
+ *
+- * @param p_hwfn
+- * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
++ * @p_hwfn: HW device data.
++ * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+- * @return enum _qed_status
++ * Return: enum _qed_status.
+ */
+ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+ /**
+- * @brief Get link paramters for VF from qed
++ * qed_vf_get_link_params(): Get link parameters for VF from qed
++ *
++ * @p_hwfn: HW device data.
++ * @params: the link params structure to be filled for the VF.
+ *
+- * @param p_hwfn
+- * @param params - the link params structure to be filled for the VF
++ * Return: Void.
+ */
+ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+ /**
+- * @brief Get link state for VF from qed
++ * qed_vf_get_link_state(): Get link state for VF from qed.
++ *
++ * @p_hwfn: HW device data.
++ * @link: the link state structure to be filled for the VF
+ *
+- * @param p_hwfn
+- * @param link - the link state structure to be filled for the VF
++ * Return: Void.
+ */
+ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+ /**
+- * @brief Get link capabilities for VF from qed
++ * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
+ *
+- * @param p_hwfn
+- * @param p_link_caps - the link capabilities structure to be filled for the VF
++ * @p_hwfn: HW device data.
++ * @p_link_caps: the link capabilities structure to be filled for the VF
++ *
++ * Return: Void.
+ */
+ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+ /**
+- * @brief Get number of Rx queues allocated for VF by qed
++ * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
++ *
++ * @p_hwfn: HW device data.
++ * @num_rxqs: allocated RX queues
+ *
+- * @param p_hwfn
+- * @param num_rxqs - allocated RX queues
++ * Return: Void.
+ */
+ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+ /**
+- * @brief Get number of Rx queues allocated for VF by qed
++ * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
+ *
+- * @param p_hwfn
+- * @param num_txqs - allocated RX queues
++ * @p_hwfn: HW device data.
++ * @num_txqs: allocated RX queues
++ *
++ * Return: Void.
+ */
+ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
+
+ /**
+- * @brief Get number of available connections [both Rx and Tx] for VF
++ * qed_vf_get_num_cids(): Get number of available connections
++ * [both Rx and Tx] for VF
++ *
++ * @p_hwfn: HW device data.
++ * @num_cids: allocated number of connections
+ *
+- * @param p_hwfn
+- * @param num_cids - allocated number of connections
++ * Return: Void.
+ */
+ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
+
+ /**
+- * @brief Get port mac address for VF
++ * qed_vf_get_port_mac(): Get port mac address for VF.
+ *
+- * @param p_hwfn
+- * @param port_mac - destination location for port mac
++ * @p_hwfn: HW device data.
++ * @port_mac: destination location for port mac
++ *
++ * Return: Void.
+ */
+ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+ /**
+- * @brief Get number of VLAN filters allocated for VF by qed
++ * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
++ * for VF by qed.
++ *
++ * @p_hwfn: HW device data.
++ * @num_vlan_filters: allocated VLAN filters
+ *
+- * @param p_hwfn
+- * @param num_rxqs - allocated VLAN filters
++ * Return: Void.
+ */
+ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+ /**
+- * @brief Get number of MAC filters allocated for VF by qed
++ * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
++ * for VF by qed
+ *
+- * @param p_hwfn
+- * @param num_rxqs - allocated MAC filters
++ * @p_hwfn: HW device data.
++ * @num_mac_filters: allocated MAC filters
++ *
++ * Return: Void.
+ */
+ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
+
+ /**
+- * @brief Check if VF can set a MAC address
++ * qed_vf_check_mac(): Check if VF can set a MAC address
+ *
+- * @param p_hwfn
+- * @param mac
++ * @p_hwfn: HW device data.
++ * @mac: Mac.
+ *
+- * @return bool
++ * Return: bool.
+ */
+ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+ /**
+- * @brief Set firmware version information in dev_info from VFs acquire response tlv
++ * qed_vf_get_fw_version(): Set firmware version information
++ * in dev_info from VFs acquire response tlv
++ *
++ * @p_hwfn: HW device data.
++ * @fw_major: FW major.
++ * @fw_minor: FW minor.
++ * @fw_rev: FW rev.
++ * @fw_eng: FW eng.
+ *
+- * @param p_hwfn
+- * @param fw_major
+- * @param fw_minor
+- * @param fw_rev
+- * @param fw_eng
++ * Return: Void.
+ */
+ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+ /**
+- * @brief hw preparation for VF
+- * sends ACQUIRE message
++ * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief VF - start the RX Queue by sending a message to the PF
+- * @param p_hwfn
+- * @param p_cid - Only relative fields are relevant
+- * @param bd_max_bytes - maximum number of bytes per bd
+- * @param bd_chain_phys_addr - physical address of bd chain
+- * @param cqe_pbl_addr - physical address of pbl
+- * @param cqe_pbl_size - pbl size
+- * @param pp_prod - pointer to the producer to be
+- * used in fastpath
++ * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
++ *
++ * @p_hwfn: HW device data.
++ * @p_cid: Only relative fields are relevant
++ * @bd_max_bytes: maximum number of bytes per bd
++ * @bd_chain_phys_addr: physical address of bd chain
++ * @cqe_pbl_addr: physical address of pbl
++ * @cqe_pbl_size: pbl size
++ * @pp_prod: pointer to the producer to be used in fastpath
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+@@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+ /**
+- * @brief VF - start the TX queue by sending a message to the
+- * PF.
++ * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
++ * PF.
+ *
+- * @param p_hwfn
+- * @param tx_queue_id - zero based within the VF
+- * @param sb - status block for this queue
+- * @param sb_index - index within the status block
+- * @param bd_chain_phys_addr - physical address of tx chain
+- * @param pp_doorbell - pointer to address to which to
+- * write the doorbell too..
++ * @p_hwfn: HW device data.
++ * @p_cid: CID.
++ * @pbl_addr: PBL address.
++ * @pbl_size: PBL Size.
++ * @pp_doorbell: pointer to address to which to write the doorbell too.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int
+ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+@@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+ /**
+- * @brief VF - stop the RX queue by sending a message to the PF
++ * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
+ *
+- * @param p_hwfn
+- * @param p_cid
+- * @param cqe_completion
++ * @p_hwfn: HW device data.
++ * @p_cid: CID.
++ * @cqe_completion: CQE Completion.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid, bool cqe_completion);
+
+ /**
+- * @brief VF - stop the TX queue by sending a message to the PF
++ * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
+ *
+- * @param p_hwfn
+- * @param tx_qid
++ * @p_hwfn: HW device data.
++ * @p_cid: CID.
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
+
+ /**
+- * @brief VF - send a vport update command
++ * qed_vf_pf_vport_update(): VF - send a vport update command.
+ *
+- * @param p_hwfn
+- * @param params
++ * @p_hwfn: HW device data.
++ * @p_params: Params
+ *
+- * @return int
++ * Return: Int.
+ */
+ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+ /**
++ * qed_vf_pf_reset(): VF - send a close message to PF.
+ *
+- * @brief VF - send a close message to PF
++ * @p_hwfn: HW device data.
+ *
+- * @param p_hwfn
+- *
+- * @return enum _qed_status
++ * Return: enum _qed_status
+ */
+ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief VF - free vf`s memories
++ * qed_vf_pf_release(): VF - free vf`s memories.
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return enum _qed_status
++ * Return: enum _qed_status
+ */
+ int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
++ * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+- * @param p_hwfn
+- * @param sb_id
++ * @p_hwfn: HW device data.
++ * @sb_id: SB ID.
+ *
+- * @return INLINE u16
++ * Return: INLINE u16
+ */
+ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+ /**
+- * @brief Stores [or removes] a configured sb_info.
++ * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
++ *
++ * @p_hwfn: HW device data.
++ * @sb_id: zero-based SB index [for fastpath]
++ * @p_sb: may be NULL [during removal].
+ *
+- * @param p_hwfn
+- * @param sb_id - zero-based SB index [for fastpath]
+- * @param sb_info - may be NULL [during removal].
++ * Return: Void.
+ */
+ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb);
+
+ /**
+- * @brief qed_vf_pf_vport_start - perform vport start for VF.
++ * qed_vf_pf_vport_start(): perform vport start for VF.
+ *
+- * @param p_hwfn
+- * @param vport_id
+- * @param mtu
+- * @param inner_vlan_removal
+- * @param tpa_mode
+- * @param max_buffers_per_cqe,
+- * @param only_untagged - default behavior regarding vlan acceptance
++ * @p_hwfn: HW device data.
++ * @vport_id: Vport ID.
++ * @mtu: MTU.
++ * @inner_vlan_removal: Innter VLAN removal.
++ * @tpa_mode: TPA mode
++ * @max_buffers_per_cqe: Max buffer pre CQE.
++ * @only_untagged: default behavior regarding vlan acceptance
+ *
+- * @return enum _qed_status
++ * Return: enum _qed_status
+ */
+ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+@@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+ /**
+- * @brief qed_vf_pf_vport_stop - stop the VF's vport
++ * qed_vf_pf_vport_stop(): stop the VF's vport
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return enum _qed_status
++ * Return: enum _qed_status
+ */
+ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+@@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+ /**
+- * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
++ * qed_vf_pf_int_cleanup(): clean the SB of the VF
+ *
+- * @param p_hwfn
++ * @p_hwfn: HW device data.
+ *
+- * @return enum _qed_status
++ * Return: enum _qed_status
+ */
+ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+ /**
+- * @brief - return the link params in a given bulletin board
++ * __qed_vf_get_link_params(): return the link params in a given bulletin board
+ *
+- * @param p_hwfn
+- * @param p_params - pointer to a struct to fill with link params
+- * @param p_bulletin
++ * @p_hwfn: HW device data.
++ * @p_params: pointer to a struct to fill with link params
++ * @p_bulletin: Bulletin.
++ *
++ * Return: Void.
+ */
+ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+ /**
+- * @brief - return the link state in a given bulletin board
++ * __qed_vf_get_link_state(): return the link state in a given bulletin board
++ *
++ * @p_hwfn: HW device data.
++ * @p_link: pointer to a struct to fill with link state
++ * @p_bulletin: Bulletin.
+ *
+- * @param p_hwfn
+- * @param p_link - pointer to a struct to fill with link state
+- * @param p_bulletin
++ * Return: Void.
+ */
+ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+ /**
+- * @brief - return the link capabilities in a given bulletin board
++ * __qed_vf_get_link_caps(): return the link capabilities in a given
++ * bulletin board
+ *
+- * @param p_hwfn
+- * @param p_link - pointer to a struct to fill with link capabilities
+- * @param p_bulletin
++ * @p_hwfn: HW device data.
++ * @p_link_caps: pointer to a struct to fill with link capabilities
++ * @p_bulletin: Bulletin.
++ *
++ * Return: Void.
+ */
+ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+@@ -1029,9 +1062,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+
+ u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
+ /**
+- * @brief - Ask PF to update the MAC address in it's bulletin board
++ * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
++ * it's bulletin board
++ *
++ * @p_hwfn: HW device data.
++ * @p_mac: mac address to be updated in bulletin board
+ *
+- * @param p_mac - mac address to be updated in bulletin board
++ * Return: Int.
+ */
+ int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 6c22bfc16ee6b..fee47c8eeff49 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -2832,10 +2832,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
+ }
+
+ /**
+- * qede_io_error_detected - called when PCI error is detected
++ * qede_io_error_detected(): Called when PCI error is detected
++ *
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
++ *Return: pci_ers_result_t.
++ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index 6b8013fb17c38..eb59e8abe6915 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev,
+ return err;
+ }
+
++ /*
++ * SynQuacer is physically configured with TX and RX delays
++ * but the standard firmware claimed otherwise for a long
++ * time, ignore it.
++ */
++ if (of_machine_is_compatible("socionext,developer-box") &&
++ priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
++ dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
++ priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
++ }
++
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 2ab29efa6b6e4..b4db50c9e7038 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1556,15 +1556,15 @@ static int temac_probe(struct platform_device *pdev)
+ }
+
+ /* Error handle returned DMA RX and TX interrupts */
+- if (lp->rx_irq < 0) {
+- if (lp->rx_irq != -EPROBE_DEFER)
+- dev_err(&pdev->dev, "could not get DMA RX irq\n");
+- return lp->rx_irq;
++ if (lp->rx_irq <= 0) {
++ rc = lp->rx_irq ?: -EINVAL;
++ return dev_err_probe(&pdev->dev, rc,
++ "could not get DMA RX irq\n");
+ }
+- if (lp->tx_irq < 0) {
+- if (lp->tx_irq != -EPROBE_DEFER)
+- dev_err(&pdev->dev, "could not get DMA TX irq\n");
+- return lp->tx_irq;
++ if (lp->tx_irq <= 0) {
++ rc = lp->tx_irq ?: -EINVAL;
++ return dev_err_probe(&pdev->dev, rc,
++ "could not get DMA TX irq\n");
+ }
+
+ if (temac_np) {
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 663ce0e09c2de..bdb05d246b86e 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file)
+ q->sock.state = SS_CONNECTED;
+ q->sock.file = file;
+ q->sock.ops = &tap_socket_ops;
+- sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
++ sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
+ q->sk.sk_write_space = tap_sock_write_space;
+ q->sk.sk_destruct = tap_sock_destruct;
+ q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 30eea8270c9b2..924bdae314c80 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3411,7 +3411,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ tfile->socket.file = file;
+ tfile->socket.ops = &tun_socket_ops;
+
+- sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
++ sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
+
+ tfile->sk.sk_write_space = tun_sock_write_space;
+ tfile->sk.sk_sndbuf = INT_MAX;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 3497b5a286ea3..695e4efdc0114 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -617,9 +617,23 @@ static const struct usb_device_id products[] = {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
++ .idProduct = 0x8005, /* A-300 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = 0,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
+ .idProduct = 0x8006, /* B-500/SL-5600 */
+ ZAURUS_MASTER_INTERFACE,
+ .driver_info = 0,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
++ .idProduct = 0x8006, /* B-500/SL-5600 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = 0,
+ }, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+@@ -627,6 +641,13 @@ static const struct usb_device_id products[] = {
+ .idProduct = 0x8007, /* C-700 */
+ ZAURUS_MASTER_INTERFACE,
+ .driver_info = 0,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
++ .idProduct = 0x8007, /* C-700 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = 0,
+ }, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index e4fbb4d866064..566aa01ad2810 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1771,6 +1771,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ } else if (!info->in || !info->out)
+ status = usbnet_get_endpoints (dev, udev);
+ else {
++ u8 ep_addrs[3] = {
++ info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
++ };
++
+ dev->in = usb_rcvbulkpipe (xdev, info->in);
+ dev->out = usb_sndbulkpipe (xdev, info->out);
+ if (!(info->flags & FLAG_NO_SETINT))
+@@ -1780,6 +1784,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ else
+ status = 0;
+
++ if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
++ status = -EINVAL;
+ }
+ if (status >= 0 && dev->status)
+ status = init_status (dev, udev);
+diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
+index 7984f2157d222..df3617c4c44e8 100644
+--- a/drivers/net/usb/zaurus.c
++++ b/drivers/net/usb/zaurus.c
+@@ -289,9 +289,23 @@ static const struct usb_device_id products [] = {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
++ .idProduct = 0x8005, /* A-300 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = (unsigned long)&bogus_mdlm_info,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
+ .idProduct = 0x8006, /* B-500/SL-5600 */
+ ZAURUS_MASTER_INTERFACE,
+ .driver_info = ZAURUS_PXA_INFO,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
++ .idProduct = 0x8006, /* B-500/SL-5600 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = (unsigned long)&bogus_mdlm_info,
+ }, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+@@ -299,6 +313,13 @@ static const struct usb_device_id products [] = {
+ .idProduct = 0x8007, /* C-700 */
+ ZAURUS_MASTER_INTERFACE,
+ .driver_info = ZAURUS_PXA_INFO,
++}, {
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
++ | USB_DEVICE_ID_MATCH_DEVICE,
++ .idVendor = 0x04DD,
++ .idProduct = 0x8007, /* C-700 */
++ ZAURUS_FAKE_INTERFACE,
++ .driver_info = (unsigned long)&bogus_mdlm_info,
+ }, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+index 6dbaaf95ee385..2092aa373ab32 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+@@ -123,12 +123,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
+ case MT_EE_5GHZ:
+ dev->mphy.cap.has_5ghz = true;
+ break;
+- case MT_EE_2GHZ:
+- dev->mphy.cap.has_2ghz = true;
+- break;
+ case MT_EE_DBDC:
+ dev->dbdc_support = true;
+ fallthrough;
++ case MT_EE_2GHZ:
++ dev->mphy.cap.has_2ghz = true;
++ break;
+ default:
+ dev->mphy.cap.has_2ghz = true;
+ dev->mphy.cap.has_5ghz = true;
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index a5aa0bdc61d69..e8c360879883b 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -717,7 +717,6 @@ struct qeth_card_info {
+ u16 chid;
+ u8 ids_valid:1; /* cssid,iid,chid */
+ u8 dev_addr_is_registered:1;
+- u8 open_when_online:1;
+ u8 promisc_mode:1;
+ u8 use_v1_blkt:1;
+ u8 is_vm_nic:1;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index e9807d2996a9d..62e7576bff536 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -5459,8 +5459,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ qeth_clear_ipacmd_list(card);
+
+ rtnl_lock();
+- card->info.open_when_online = card->dev->flags & IFF_UP;
+- dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index d694e3ff80865..7cdf3274cf964 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -2373,9 +2373,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+ qeth_enable_hw_features(dev);
+ qeth_l2_enable_brport_features(card);
+
+- if (card->info.open_when_online) {
+- card->info.open_when_online = 0;
+- dev_open(dev, NULL);
++ if (netif_running(dev)) {
++ local_bh_disable();
++ napi_schedule(&card->napi);
++ /* kick-start the NAPI softirq: */
++ local_bh_enable();
++ qeth_l2_set_rx_mode(dev);
+ }
+ rtnl_unlock();
+ }
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 6fd3e288f0595..93f55c7348026 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -2029,9 +2029,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+- if (card->info.open_when_online) {
+- card->info.open_when_online = 0;
+- dev_open(dev, NULL);
++ if (netif_running(dev)) {
++ local_bh_disable();
++ napi_schedule(&card->napi);
++ /* kick-start the NAPI softirq: */
++ local_bh_enable();
+ }
+ rtnl_unlock();
+ }
+diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
+index b61acbb09be3b..d323f9985c482 100644
+--- a/drivers/s390/scsi/zfcp_fc.c
++++ b/drivers/s390/scsi/zfcp_fc.c
+@@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data)
+
+ /* re-init to undo drop from zfcp_fc_adisc() */
+ port->d_id = ntoh24(adisc_resp->adisc_port_id);
+- /* port is good, unblock rport without going through erp */
+- zfcp_scsi_schedule_rport_register(port);
++ /* port is still good, nothing to do */
+ out:
+ atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ put_device(&port->dev);
+@@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
+ int retval;
+
+ set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
+- get_device(&port->dev);
+- port->rport_task = RPORT_DEL;
+- zfcp_scsi_rport_work(&port->rport_work);
+
+ /* only issue one test command at one time per port */
+ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index c9b1500c2ab87..e78cfda035a19 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -406,6 +406,7 @@ static void storvsc_on_channel_callback(void *context);
+ #define STORVSC_FC_MAX_LUNS_PER_TARGET 255
+ #define STORVSC_FC_MAX_TARGETS 128
+ #define STORVSC_FC_MAX_CHANNELS 8
++#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024))
+
+ #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
+ #define STORVSC_IDE_MAX_TARGETS 1
+@@ -2071,6 +2072,9 @@ static int storvsc_probe(struct hv_device *device,
+ * protecting it from any weird value.
+ */
+ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
++ if (is_fc)
++ max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE);
++
+ /* max_hw_sectors_kb */
+ host->max_sectors = max_xfer_bytes >> 9;
+ /*
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index b7cdfa65157c6..230a3250f3154 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -828,8 +828,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+ "%s: initializing enumeration and init completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+- init_completion(&slave->enumeration_complete);
+- init_completion(&slave->initialization_complete);
++ reinit_completion(&slave->enumeration_complete);
++ reinit_completion(&slave->initialization_complete);
+
+ } else if ((status == SDW_SLAVE_ATTACHED) &&
+ (slave->status == SDW_SLAVE_UNATTACHED)) {
+@@ -837,7 +837,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+ "%s: signaling enumeration completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+- complete(&slave->enumeration_complete);
++ complete_all(&slave->enumeration_complete);
+ }
+ slave->status = status;
+ mutex_unlock(&bus->bus_lock);
+@@ -1840,7 +1840,19 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
+ "%s: signaling initialization completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+- complete(&slave->initialization_complete);
++ complete_all(&slave->initialization_complete);
++
++ /*
++ * If the manager became pm_runtime active, the peripherals will be
++ * restarted and attach, but their pm_runtime status may remain
++ * suspended. If the 'update_slave_status' callback initiates
++ * any sort of deferred processing, this processing would not be
++ * cancelled on pm_runtime suspend.
++ * To avoid such zombie states, we queue a request to resume.
++ * This would be a no-op in case the peripheral was being resumed
++ * by e.g. the ALSA/ASoC framework.
++ */
++ pm_request_resume(&slave->dev);
+ }
+ }
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 0dc8871a4b660..a0b6ae02a70b8 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4607,7 +4607,7 @@ static void delayed_work(struct work_struct *work)
+
+ dout("mdsc delayed_work\n");
+
+- if (mdsc->stopping)
++ if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
+ return;
+
+ mutex_lock(&mdsc->mutex);
+@@ -4786,7 +4786,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
+ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
+ {
+ dout("pre_umount\n");
+- mdsc->stopping = 1;
++ mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
+
+ ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
+ ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 2667350eb72cf..cd943842f0a3c 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -370,6 +370,11 @@ struct cap_wait {
+ int want;
+ };
+
++enum {
++ CEPH_MDSC_STOPPING_BEGIN = 1,
++ CEPH_MDSC_STOPPING_FLUSHED = 2,
++};
++
+ /*
+ * mds client state
+ */
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 202ddde3d62ad..1723ec21cd470 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1227,6 +1227,16 @@ static void ceph_kill_sb(struct super_block *s)
+ ceph_mdsc_pre_umount(fsc->mdsc);
+ flush_fs_workqueues(fsc);
+
++ /*
++ * Though the kill_anon_super() will finally trigger the
++ * sync_filesystem() anyway, we still need to do it here
++ * and then bump the stage of shutdown to stop the work
++ * queue as earlier as possible.
++ */
++ sync_filesystem(s);
++
++ fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
++
+ kill_anon_super(s);
+
+ fsc->client->extra_mon_dispatch = NULL;
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index e2113e0a848c4..1dce6b4e90885 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ }
+ sbi->map_sectors = ((need_map_size - 1) >>
+ (sb->s_blocksize_bits)) + 1;
+- sbi->vol_amap = kmalloc_array(sbi->map_sectors,
++ sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
+ sizeof(struct buffer_head *), GFP_KERNEL);
+ if (!sbi->vol_amap)
+ return -ENOMEM;
+@@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ while (j < i)
+ brelse(sbi->vol_amap[j++]);
+
+- kfree(sbi->vol_amap);
++ kvfree(sbi->vol_amap);
+ sbi->vol_amap = NULL;
+ return -EIO;
+ }
+@@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
+ for (i = 0; i < sbi->map_sectors; i++)
+ __brelse(sbi->vol_amap[i]);
+
+- kfree(sbi->vol_amap);
++ kvfree(sbi->vol_amap);
+ }
+
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 3940a56902dd1..8475a8653c3a4 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -211,7 +211,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
+ exfat_init_namebuf(nb);
+ }
+
+-/* skip iterating emit_dots when dir is empty */
++/*
++ * Before calling dir_emit*(), sbi->s_lock should be released
++ * because page fault can occur in dir_emit*().
++ */
+ #define ITER_POS_FILLED_DOTS (2)
+ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+ {
+@@ -226,11 +229,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+ int err = 0, fake_offset = 0;
+
+ exfat_init_namebuf(nb);
+- mutex_lock(&EXFAT_SB(sb)->s_lock);
+
+ cpos = ctx->pos;
+ if (!dir_emit_dots(filp, ctx))
+- goto unlock;
++ goto out;
+
+ if (ctx->pos == ITER_POS_FILLED_DOTS) {
+ cpos = 0;
+@@ -242,16 +244,18 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+ /* name buffer should be allocated before use */
+ err = exfat_alloc_namebuf(nb);
+ if (err)
+- goto unlock;
++ goto out;
+ get_new:
++ mutex_lock(&EXFAT_SB(sb)->s_lock);
++
+ if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
+ goto end_of_dir;
+
+ err = exfat_readdir(inode, &cpos, &de);
+ if (err) {
+ /*
+- * At least we tried to read a sector. Move cpos to next sector
+- * position (should be aligned).
++ * At least we tried to read a sector.
++ * Move cpos to next sector position (should be aligned).
+ */
+ if (err == -EIO) {
+ cpos += 1 << (sb->s_blocksize_bits);
+@@ -274,16 +278,10 @@ get_new:
+ inum = iunique(sb, EXFAT_ROOT_INO);
+ }
+
+- /*
+- * Before calling dir_emit(), sb_lock should be released.
+- * Because page fault can occur in dir_emit() when the size
+- * of buffer given from user is larger than one page size.
+- */
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
+ (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+- goto out_unlocked;
+- mutex_lock(&EXFAT_SB(sb)->s_lock);
++ goto out;
+ ctx->pos = cpos;
+ goto get_new;
+
+@@ -291,9 +289,8 @@ end_of_dir:
+ if (!cpos && fake_offset)
+ cpos = ITER_POS_FILLED_DOTS;
+ ctx->pos = cpos;
+-unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+-out_unlocked:
++out:
+ /*
+ * To improve performance, free namebuf after unlock sb_lock.
+ * If namebuf is not allocated, this function do nothing
+diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
+index a610c096f3a9d..5207ce805a399 100644
+--- a/fs/ext2/ext2.h
++++ b/fs/ext2/ext2.h
+@@ -70,10 +70,7 @@ struct mb_cache;
+ * second extended-fs super-block data in memory
+ */
+ struct ext2_sb_info {
+- unsigned long s_frag_size; /* Size of a fragment in bytes */
+- unsigned long s_frags_per_block;/* Number of fragments per block */
+ unsigned long s_inodes_per_block;/* Number of inodes per block */
+- unsigned long s_frags_per_group;/* Number of fragments in a group */
+ unsigned long s_blocks_per_group;/* Number of blocks in a group */
+ unsigned long s_inodes_per_group;/* Number of inodes in a group */
+ unsigned long s_itb_per_group; /* Number of inode table blocks per group */
+@@ -187,15 +184,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+ #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
+ #define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
+
+-/*
+- * Macro-instructions used to manage fragments
+- */
+-#define EXT2_MIN_FRAG_SIZE 1024
+-#define EXT2_MAX_FRAG_SIZE 4096
+-#define EXT2_MIN_FRAG_LOG_SIZE 10
+-#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
+-#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
+-
+ /*
+ * Structure of a blocks group descriptor
+ */
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 486a43e347950..81798b7cbde2d 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -670,10 +670,9 @@ static int ext2_setup_super (struct super_block * sb,
+ es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
+ le16_add_cpu(&es->s_mnt_count, 1);
+ if (test_opt (sb, DEBUG))
+- ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
++ ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, "
+ "bpg=%lu, ipg=%lu, mo=%04lx]",
+ EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
+- sbi->s_frag_size,
+ sbi->s_groups_count,
+ EXT2_BLOCKS_PER_GROUP(sb),
+ EXT2_INODES_PER_GROUP(sb),
+@@ -1012,14 +1011,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ }
+ }
+
+- sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
+- le32_to_cpu(es->s_log_frag_size);
+- if (sbi->s_frag_size == 0)
+- goto cantfind_ext2;
+- sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
+-
+ sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+- sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
+ sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+
+ sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
+@@ -1045,11 +1037,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ goto failed_mount;
+ }
+
+- if (sb->s_blocksize != sbi->s_frag_size) {
++ if (es->s_log_frag_size != es->s_log_block_size) {
+ ext2_msg(sb, KERN_ERR,
+- "error: fragsize %lu != blocksize %lu"
+- "(not supported yet)",
+- sbi->s_frag_size, sb->s_blocksize);
++ "error: fragsize log %u != blocksize log %u",
++ le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits);
+ goto failed_mount;
+ }
+
+@@ -1059,12 +1050,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
+- if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
+- ext2_msg(sb, KERN_ERR,
+- "error: #fragments per group too big: %lu",
+- sbi->s_frags_per_group);
+- goto failed_mount;
+- }
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+ ext2_msg(sb, KERN_ERR,
+diff --git a/fs/file.c b/fs/file.c
+index 1501bbf6306e9..69a51d37b66d9 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1062,12 +1062,28 @@ unsigned long __fdget_raw(unsigned int fd)
+ return __fget_light(fd, 0);
+ }
+
++/*
++ * Try to avoid f_pos locking. We only need it if the
++ * file is marked for FMODE_ATOMIC_POS, and it can be
++ * accessed multiple ways.
++ *
++ * Always do it for directories, because pidfd_getfd()
++ * can make a file accessible even if it otherwise would
++ * not be, and for directories this is a correctness
++ * issue, not a "POSIX requirement".
++ */
++static inline bool file_needs_f_pos_lock(struct file *file)
++{
++ return (file->f_mode & FMODE_ATOMIC_POS) &&
++ (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode));
++}
++
+ unsigned long __fdget_pos(unsigned int fd)
+ {
+ unsigned long v = __fdget(fd);
+ struct file *file = (struct file *)(v & ~3);
+
+- if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
++ if (file && file_needs_f_pos_lock(file)) {
+ v |= FDPUT_POS_UNLOCK;
+ mutex_lock(&file->f_pos_lock);
+ }
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index c0c6bcbc8c05c..81c22df27c725 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+
+ if (!attr->non_res) {
+ lsize = le32_to_cpu(attr->res.data_size);
+- le = kmalloc(al_aligned(lsize), GFP_NOFS);
++ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ if (!le) {
+ err = -ENOMEM;
+ goto out;
+@@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ if (err < 0)
+ goto out;
+
+- le = kmalloc(al_aligned(lsize), GFP_NOFS);
++ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ if (!le) {
+ err = -ENOMEM;
+ goto out;
+diff --git a/fs/open.c b/fs/open.c
+index e93c33069055b..159a2765b7eb2 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1126,7 +1126,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
+ lookup_flags |= LOOKUP_IN_ROOT;
+ if (how->resolve & RESOLVE_CACHED) {
+ /* Don't bother even trying for create/truncate/tmpfile open */
+- if (flags & (O_TRUNC | O_CREAT | O_TMPFILE))
++ if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
+ return -EAGAIN;
+ lookup_flags |= LOOKUP_CACHED;
+ }
+diff --git a/fs/super.c b/fs/super.c
+index 297630540f43c..048576b19af63 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -863,6 +863,7 @@ int reconfigure_super(struct fs_context *fc)
+ struct super_block *sb = fc->root->d_sb;
+ int retval;
+ bool remount_ro = false;
++ bool remount_rw = false;
+ bool force = fc->sb_flags & SB_FORCE;
+
+ if (fc->sb_flags_mask & ~MS_RMT_MASK)
+@@ -880,7 +881,7 @@ int reconfigure_super(struct fs_context *fc)
+ bdev_read_only(sb->s_bdev))
+ return -EACCES;
+ #endif
+-
++ remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
+ remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
+ }
+
+@@ -910,6 +911,14 @@ int reconfigure_super(struct fs_context *fc)
+ if (retval)
+ return retval;
+ }
++ } else if (remount_rw) {
++ /*
++ * We set s_readonly_remount here to protect filesystem's
++ * reconfigure code from writes from userspace until
++ * reconfigure finishes.
++ */
++ sb->s_readonly_remount = 1;
++ smp_wmb();
+ }
+
+ if (fc->ops->reconfigure) {
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index 5a59d56a2038c..1e9c520411f84 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode,
+ */
+ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
+ bh = sb_getblk(inode->i_sb, parent);
++ if (!bh) {
++ sysv_free_block(inode->i_sb, branch[n].key);
++ break;
++ }
+ lock_buffer(bh);
+ memset(bh->b_data, 0, blocksize);
+ branch[n].bh = bh;
+diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
+index 20c93f08c9933..95a1d214108a5 100644
+--- a/include/asm-generic/word-at-a-time.h
++++ b/include/asm-generic/word-at-a-time.h
+@@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask)
+ return (mask >> 8) ? byte : byte + 1;
+ }
+
+-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
++static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+ {
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
+index cd5b62db90845..e63a63aa47a37 100644
+--- a/include/linux/pm_wakeirq.h
++++ b/include/linux/pm_wakeirq.h
+@@ -17,8 +17,8 @@
+ #ifdef CONFIG_PM
+
+ extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+- int irq);
++extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
++extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
+ extern void dev_pm_clear_wake_irq(struct device *dev);
+ extern void dev_pm_enable_wake_irq(struct device *dev);
+ extern void dev_pm_disable_wake_irq(struct device *dev);
+@@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+ return 0;
+ }
+
++static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
++{
++ return 0;
++}
++
+ static inline void dev_pm_clear_wake_irq(struct device *dev)
+ {
+ }
+diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
+index f34dbd0db7952..a84063492c71a 100644
+--- a/include/linux/qed/qed_chain.h
++++ b/include/linux/qed/qed_chain.h
+@@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
+ }
+
+ /**
+- * @brief qed_chain_advance_page -
++ * qed_chain_advance_page(): Advance the next element across pages for a
++ * linked chain.
+ *
+- * Advance the next element across pages for a linked chain
++ * @p_chain: P_chain.
++ * @p_next_elem: P_next_elem.
++ * @idx_to_inc: Idx_to_inc.
++ * @page_to_inc: page_to_inc.
+ *
+- * @param p_chain
+- * @param p_next_elem
+- * @param idx_to_inc
+- * @param page_to_inc
++ * Return: Void.
+ */
+ static inline void
+ qed_chain_advance_page(struct qed_chain *p_chain,
+@@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
+ } while (0)
+
+ /**
+- * @brief qed_chain_return_produced -
++ * qed_chain_return_produced(): A chain in which the driver "Produces"
++ * elements should use this API
++ * to indicate previous produced elements
++ * are now consumed.
+ *
+- * A chain in which the driver "Produces" elements should use this API
+- * to indicate previous produced elements are now consumed.
++ * @p_chain: Chain.
+ *
+- * @param p_chain
++ * Return: Void.
+ */
+ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+ {
+@@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_produce -
++ * qed_chain_produce(): A chain in which the driver "Produces"
++ * elements should use this to get a pointer to
++ * the next element which can be "Produced". It's driver
++ * responsibility to validate that the chain has room for
++ * new element.
+ *
+- * A chain in which the driver "Produces" elements should use this to get
+- * a pointer to the next element which can be "Produced". It's driver
+- * responsibility to validate that the chain has room for new element.
++ * @p_chain: Chain.
+ *
+- * @param p_chain
+- *
+- * @return void*, a pointer to next element
++ * Return: void*, a pointer to next element.
+ */
+ static inline void *qed_chain_produce(struct qed_chain *p_chain)
+ {
+@@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_get_capacity -
+- *
+- * Get the maximum number of BDs in chain
++ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
+ *
+- * @param p_chain
+- * @param num
++ * @p_chain: Chain.
+ *
+- * @return number of unusable BDs
++ * Return: number of unusable BDs.
+ */
+ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
+ {
+@@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_recycle_consumed -
++ * qed_chain_recycle_consumed(): Returns an element which was
++ * previously consumed;
++ * Increments producers so they could
++ * be written to FW.
+ *
+- * Returns an element which was previously consumed;
+- * Increments producers so they could be written to FW.
++ * @p_chain: Chain.
+ *
+- * @param p_chain
++ * Return: Void.
+ */
+ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
+ {
+@@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_consume -
++ * qed_chain_consume(): A Chain in which the driver utilizes data written
++ * by a different source (i.e., FW) should use this to
++ * access passed buffers.
+ *
+- * A Chain in which the driver utilizes data written by a different source
+- * (i.e., FW) should use this to access passed buffers.
++ * @p_chain: Chain.
+ *
+- * @param p_chain
+- *
+- * @return void*, a pointer to the next buffer written
++ * Return: void*, a pointer to the next buffer written.
+ */
+ static inline void *qed_chain_consume(struct qed_chain *p_chain)
+ {
+@@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_reset - Resets the chain to its start state
++ * qed_chain_reset(): Resets the chain to its start state.
++ *
++ * @p_chain: pointer to a previously allocated chain.
+ *
+- * @param p_chain pointer to a previously allocated chain
++ * Return Void.
+ */
+ static inline void qed_chain_reset(struct qed_chain *p_chain)
+ {
+@@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
+ }
+
+ /**
+- * @brief qed_chain_get_last_elem -
++ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
++ * chain.
+ *
+- * Returns a pointer to the last element of the chain
++ * @p_chain: Chain.
+ *
+- * @param p_chain
+- *
+- * @return void*
++ * Return: void*.
+ */
+ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
+ {
+@@ -563,10 +565,13 @@ out:
+ }
+
+ /**
+- * @brief qed_chain_set_prod - sets the prod to the given value
++ * qed_chain_set_prod(): sets the prod to the given value.
++ *
++ * @p_chain: Chain.
++ * @prod_idx: Prod Idx.
++ * @p_prod_elem: Prod elem.
+ *
+- * @param prod_idx
+- * @param p_prod_elem
++ * Return Void.
+ */
+ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+@@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ }
+
+ /**
+- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
++ * qed_chain_pbl_zero_mem(): set chain memory to 0.
++ *
++ * @p_chain: Chain.
+ *
+- * @param p_chain
++ * Return: Void.
+ */
+ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
+ {
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 850b989916703..f39451aaaeec2 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -819,47 +819,47 @@ struct qed_common_cb_ops {
+
+ struct qed_selftest_ops {
+ /**
+- * @brief selftest_interrupt - Perform interrupt test
++ * selftest_interrupt(): Perform interrupt test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*selftest_interrupt)(struct qed_dev *cdev);
+
+ /**
+- * @brief selftest_memory - Perform memory test
++ * selftest_memory(): Perform memory test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*selftest_memory)(struct qed_dev *cdev);
+
+ /**
+- * @brief selftest_register - Perform register test
++ * selftest_register(): Perform register test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*selftest_register)(struct qed_dev *cdev);
+
+ /**
+- * @brief selftest_clock - Perform clock test
++ * selftest_clock(): Perform clock test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*selftest_clock)(struct qed_dev *cdev);
+
+ /**
+- * @brief selftest_nvram - Perform nvram test
++ * selftest_nvram(): Perform nvram test.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*selftest_nvram) (struct qed_dev *cdev);
+ };
+@@ -927,47 +927,53 @@ struct qed_common_ops {
+ enum qed_hw_err_type err_type);
+
+ /**
+- * @brief can_link_change - can the instance change the link or not
++ * can_link_change(): can the instance change the link or not.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return true if link-change is allowed, false otherwise.
++ * Return: true if link-change is allowed, false otherwise.
+ */
+ bool (*can_link_change)(struct qed_dev *cdev);
+
+ /**
+- * @brief set_link - set links according to params
++ * set_link(): set links according to params.
+ *
+- * @param cdev
+- * @param params - values used to override the default link configuration
++ * @cdev: Qed dev pointer.
++ * @params: values used to override the default link configuration.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*set_link)(struct qed_dev *cdev,
+ struct qed_link_params *params);
+
+ /**
+- * @brief get_link - returns the current link state.
++ * get_link(): returns the current link state.
+ *
+- * @param cdev
+- * @param if_link - structure to be filled with current link configuration.
++ * @cdev: Qed dev pointer.
++ * @if_link: structure to be filled with current link configuration.
++ *
++ * Return: Void.
+ */
+ void (*get_link)(struct qed_dev *cdev,
+ struct qed_link_output *if_link);
+
+ /**
+- * @brief - drains chip in case Tx completions fail to arrive due to pause.
++ * drain(): drains chip in case Tx completions fail to arrive due to pause.
++ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: Int.
+ */
+ int (*drain)(struct qed_dev *cdev);
+
+ /**
+- * @brief update_msglvl - update module debug level
++ * update_msglvl(): update module debug level.
+ *
+- * @param cdev
+- * @param dp_module
+- * @param dp_level
++ * @cdev: Qed dev pointer.
++ * @dp_module: Debug module.
++ * @dp_level: Debug level.
++ *
++ * Return: Void.
+ */
+ void (*update_msglvl)(struct qed_dev *cdev,
+ u32 dp_module,
+@@ -981,70 +987,73 @@ struct qed_common_ops {
+ struct qed_chain *p_chain);
+
+ /**
+- * @brief nvm_flash - Flash nvm data.
++ * nvm_flash(): Flash nvm data.
+ *
+- * @param cdev
+- * @param name - file containing the data
++ * @cdev: Qed dev pointer.
++ * @name: file containing the data.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*nvm_flash)(struct qed_dev *cdev, const char *name);
+
+ /**
+- * @brief nvm_get_image - reads an entire image from nvram
++ * nvm_get_image(): reads an entire image from nvram.
+ *
+- * @param cdev
+- * @param type - type of the request nvram image
+- * @param buf - preallocated buffer to fill with the image
+- * @param len - length of the allocated buffer
++ * @cdev: Qed dev pointer.
++ * @type: type of the request nvram image.
++ * @buf: preallocated buffer to fill with the image.
++ * @len: length of the allocated buffer.
+ *
+- * @return 0 on success, error otherwise
++ * Return: 0 on success, error otherwise.
+ */
+ int (*nvm_get_image)(struct qed_dev *cdev,
+ enum qed_nvm_images type, u8 *buf, u16 len);
+
+ /**
+- * @brief set_coalesce - Configure Rx coalesce value in usec
++ * set_coalesce(): Configure Rx coalesce value in usec.
+ *
+- * @param cdev
+- * @param rx_coal - Rx coalesce value in usec
+- * @param tx_coal - Tx coalesce value in usec
+- * @param qid - Queue index
+- * @param sb_id - Status Block Id
++ * @cdev: Qed dev pointer.
++ * @rx_coal: Rx coalesce value in usec.
++ * @tx_coal: Tx coalesce value in usec.
++ * @handle: Handle.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*set_coalesce)(struct qed_dev *cdev,
+ u16 rx_coal, u16 tx_coal, void *handle);
+
+ /**
+- * @brief set_led - Configure LED mode
++ * set_led() - Configure LED mode.
+ *
+- * @param cdev
+- * @param mode - LED mode
++ * @cdev: Qed dev pointer.
++ * @mode: LED mode.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*set_led)(struct qed_dev *cdev,
+ enum qed_led_mode mode);
+
+ /**
+- * @brief attn_clr_enable - Prevent attentions from being reasserted
++ * attn_clr_enable(): Prevent attentions from being reasserted.
++ *
++ * @cdev: Qed dev pointer.
++ * @clr_enable: Clear enable.
+ *
+- * @param cdev
+- * @param clr_enable
++ * Return: Void.
+ */
+ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
+
+ /**
+- * @brief db_recovery_add - add doorbell information to the doorbell
+- * recovery mechanism.
++ * db_recovery_add(): add doorbell information to the doorbell
++ * recovery mechanism.
+ *
+- * @param cdev
+- * @param db_addr - doorbell address
+- * @param db_data - address of where db_data is stored
+- * @param db_is_32b - doorbell is 32b pr 64b
+- * @param db_is_user - doorbell recovery addresses are user or kernel space
++ * @cdev: Qed dev pointer.
++ * @db_addr: Doorbell address.
++ * @db_data: Dddress of where db_data is stored.
++ * @db_width: Doorbell is 32b or 64b.
++ * @db_space: Doorbell recovery addresses are user or kernel space.
++ *
++ * Return: Int.
+ */
+ int (*db_recovery_add)(struct qed_dev *cdev,
+ void __iomem *db_addr,
+@@ -1053,114 +1062,130 @@ struct qed_common_ops {
+ enum qed_db_rec_space db_space);
+
+ /**
+- * @brief db_recovery_del - remove doorbell information from the doorbell
++ * db_recovery_del(): remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+- * @param cdev
+- * @param db_addr - doorbell address
+- * @param db_data - address where db_data is stored. Serves as key for the
+- * entry to delete.
++ * @cdev: Qed dev pointer.
++ * @db_addr: Doorbell address.
++ * @db_data: Address where db_data is stored. Serves as key for the
++ * entry to delete.
++ *
++ * Return: Int.
+ */
+ int (*db_recovery_del)(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+
+ /**
+- * @brief recovery_process - Trigger a recovery process
++ * recovery_process(): Trigger a recovery process.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*recovery_process)(struct qed_dev *cdev);
+
+ /**
+- * @brief recovery_prolog - Execute the prolog operations of a recovery process
++ * recovery_prolog(): Execute the prolog operations of a recovery process.
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*recovery_prolog)(struct qed_dev *cdev);
+
+ /**
+- * @brief update_drv_state - API to inform the change in the driver state.
++ * update_drv_state(): API to inform the change in the driver state.
+ *
+- * @param cdev
+- * @param active
++ * @cdev: Qed dev pointer.
++ * @active: Active
+ *
++ * Return: Int.
+ */
+ int (*update_drv_state)(struct qed_dev *cdev, bool active);
+
+ /**
+- * @brief update_mac - API to inform the change in the mac address
++ * update_mac(): API to inform the change in the mac address.
+ *
+- * @param cdev
+- * @param mac
++ * @cdev: Qed dev pointer.
++ * @mac: MAC.
+ *
++ * Return: Int.
+ */
+ int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+
+ /**
+- * @brief update_mtu - API to inform the change in the mtu
++ * update_mtu(): API to inform the change in the mtu.
+ *
+- * @param cdev
+- * @param mtu
++ * @cdev: Qed dev pointer.
++ * @mtu: MTU.
+ *
++ * Return: Int.
+ */
+ int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
+
+ /**
+- * @brief update_wol - update of changes in the WoL configuration
++ * update_wol(): Update of changes in the WoL configuration.
++ *
++ * @cdev: Qed dev pointer.
++ * @enabled: true iff WoL should be enabled.
+ *
+- * @param cdev
+- * @param enabled - true iff WoL should be enabled.
++ * Return: Int.
+ */
+ int (*update_wol) (struct qed_dev *cdev, bool enabled);
+
+ /**
+- * @brief read_module_eeprom
++ * read_module_eeprom(): Read EEPROM.
+ *
+- * @param cdev
+- * @param buf - buffer
+- * @param dev_addr - PHY device memory region
+- * @param offset - offset into eeprom contents to be read
+- * @param len - buffer length, i.e., max bytes to be read
++ * @cdev: Qed dev pointer.
++ * @buf: buffer.
++ * @dev_addr: PHY device memory region.
++ * @offset: offset into eeprom contents to be read.
++ * @len: buffer length, i.e., max bytes to be read.
++ *
++ * Return: Int.
+ */
+ int (*read_module_eeprom)(struct qed_dev *cdev,
+ char *buf, u8 dev_addr, u32 offset, u32 len);
+
+ /**
+- * @brief get_affin_hwfn_idx
++ * get_affin_hwfn_idx(): Get affine HW function.
++ *
++ * @cdev: Qed dev pointer.
+ *
+- * @param cdev
++ * Return: u8.
+ */
+ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
+
+ /**
+- * @brief read_nvm_cfg - Read NVM config attribute value.
+- * @param cdev
+- * @param buf - buffer
+- * @param cmd - NVM CFG command id
+- * @param entity_id - Entity id
++ * read_nvm_cfg(): Read NVM config attribute value.
++ *
++ * @cdev: Qed dev pointer.
++ * @buf: Buffer.
++ * @cmd: NVM CFG command id.
++ * @entity_id: Entity id.
+ *
++ * Return: Int.
+ */
+ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
+ u32 entity_id);
+ /**
+- * @brief read_nvm_cfg - Read NVM config attribute value.
+- * @param cdev
+- * @param cmd - NVM CFG command id
++ * read_nvm_cfg_len(): Read NVM config attribute value.
+ *
+- * @return config id length, 0 on error.
++ * @cdev: Qed dev pointer.
++ * @cmd: NVM CFG command id.
++ *
++ * Return: config id length, 0 on error.
+ */
+ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
+
+ /**
+- * @brief set_grc_config - Configure value for grc config id.
+- * @param cdev
+- * @param cfg_id - grc config id
+- * @param val - grc config value
++ * set_grc_config(): Configure value for grc config id.
++ *
++ * @cdev: Qed dev pointer.
++ * @cfg_id: grc config id
++ * @val: grc config value
+ *
++ * Return: Int.
+ */
+ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
+
+@@ -1397,18 +1422,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+ }
+
+ /**
++ * qed_sb_ack(): This function creates an update command for interrupts
++ * that is written to the IGU.
+ *
+- * @brief This function creates an update command for interrupts that is
+- * written to the IGU.
+- *
+- * @param sb_info - This is the structure allocated and
+- * initialized per status block. Assumption is
+- * that it was initialized using qed_sb_init
+- * @param int_cmd - Enable/Disable/Nop
+- * @param upd_flg - whether igu consumer should be
+- * updated.
++ * @sb_info: This is the structure allocated and
++ * initialized per status block. Assumption is
++ * that it was initialized using qed_sb_init
++ * @int_cmd: Enable/Disable/Nop
++ * @upd_flg: Whether igu consumer should be updated.
+ *
+- * @return inline void
++ * Return: inline void.
+ */
+ static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+ enum igu_int_cmd int_cmd,
+diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
+index 04180d9af560e..494cdc3cd840b 100644
+--- a/include/linux/qed/qed_iscsi_if.h
++++ b/include/linux/qed/qed_iscsi_if.h
+@@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops {
+ * @param stats - pointer to struck that would be filled
+ * we stats
+ * @return 0 on success, error otherwise.
+- * @change_mac Change MAC of interface
++ * @change_mac: Change MAC of interface
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param mac - new MAC to configure.
+diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
+index ff808d2488835..5b67cd03276eb 100644
+--- a/include/linux/qed/qed_ll2_if.h
++++ b/include/linux/qed/qed_ll2_if.h
+@@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags {
+
+ struct qed_ll2_ops {
+ /**
+- * @brief start - initializes ll2
++ * start(): Initializes ll2.
+ *
+- * @param cdev
+- * @param params - protocol driver configuration for the ll2.
++ * @cdev: Qed dev pointer.
++ * @params: Protocol driver configuration for the ll2.
+ *
+- * @return 0 on success, otherwise error value.
++ * Return: 0 on success, otherwise error value.
+ */
+ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+ /**
+- * @brief stop - stops the ll2
++ * stop(): Stops the ll2
+ *
+- * @param cdev
++ * @cdev: Qed dev pointer.
+ *
+- * @return 0 on success, otherwise error value.
++ * Return: 0 on success, otherwise error value.
+ */
+ int (*stop)(struct qed_dev *cdev);
+
+ /**
+- * @brief start_xmit - transmits an skb over the ll2 interface
++ * start_xmit(): Transmits an skb over the ll2 interface
+ *
+- * @param cdev
+- * @param skb
+- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
++ * @cdev: Qed dev pointer.
++ * @skb: SKB.
++ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
+ *
+- * @return 0 on success, otherwise error value.
++ * Return: 0 on success, otherwise error value.
+ */
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags);
+
+ /**
+- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
++ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+- * @param cdev
+- * @param cookie - to be passed to the callback functions.
+- * @param ops - the callback functions to register for Rx / Tx.
++ * @cdev: Qed dev pointer.
++ * @cookie: to be passed to the callback functions.
++ * @ops: the callback functions to register for Rx / Tx.
+ *
+- * @return 0 on success, otherwise error value.
++ * Return: 0 on success, otherwise error value.
+ */
+ void (*register_cb_ops)(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie);
+
+ /**
+- * @brief get LL2 related statistics
++ * get_stats(): Get LL2 related statistics.
+ *
+- * @param cdev
+- * @param stats - pointer to struct that would be filled with stats
++ * @cdev: Qed dev pointer.
++ * @stats: Pointer to struct that would be filled with stats.
+ *
+- * @return 0 on success, error otherwise.
++ * Return: 0 on success, error otherwise.
+ */
+ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+ };
+diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
+index 14671bc19ed11..1d51df347560d 100644
+--- a/include/linux/qed/qed_nvmetcp_if.h
++++ b/include/linux/qed/qed_nvmetcp_if.h
+@@ -171,6 +171,23 @@ struct nvmetcp_task_params {
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
++ * @init_read_io: Init read IO.
++ * @task_params
++ * @cmd_pdu_header
++ * @nvme_cmd
++ * @sgl_task_params
++ * @init_write_io: Init write IO.
++ * @task_params
++ * @cmd_pdu_header
++ * @nvme_cmd
++ * @sgl_task_params
++ * @init_icreq_exchange: Exchange ICReq.
++ * @task_params
++ * @init_conn_req_pdu_hdr
++ * @tx_sgl_task_params
++ * @rx_sgl_task_params
++ * @init_task_cleanup: Init task cleanup.
++ * @task_params
+ */
+ struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+diff --git a/include/net/vxlan.h b/include/net/vxlan.h
+index cf1d870f7b9a8..e149a0b6f9a3c 100644
+--- a/include/net/vxlan.h
++++ b/include/net/vxlan.h
+@@ -497,12 +497,12 @@ static inline void vxlan_flag_attr_error(int attrtype,
+ }
+
+ static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
+- int hash,
++ u32 hash,
+ struct vxlan_rdst *rdst)
+ {
+ struct fib_nh_common *nhc;
+
+- nhc = nexthop_path_fdb_result(nh, hash);
++ nhc = nexthop_path_fdb_result(nh, hash >> 1);
+ if (unlikely(!nhc))
+ return false;
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 774b1ae8adf7c..6f1d88bfd690b 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -7802,12 +7802,21 @@ static int io_run_task_work_sig(void)
+ return -EINTR;
+ }
+
++static bool current_pending_io(void)
++{
++ struct io_uring_task *tctx = current->io_uring;
++
++ if (!tctx)
++ return false;
++ return percpu_counter_read_positive(&tctx->inflight);
++}
++
+ /* when returns >0, the caller should retry */
+ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ ktime_t *timeout)
+ {
+- int token, ret;
++ int io_wait, ret;
+
+ /* make sure we run task_work before checking for signals */
+ ret = io_run_task_work_sig();
+@@ -7818,15 +7827,17 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ return 1;
+
+ /*
+- * Use io_schedule_prepare/finish, so cpufreq can take into account
+- * that the task is waiting for IO - turns out to be important for low
+- * QD IO.
++ * Mark us as being in io_wait if we have pending requests, so cpufreq
++ * can take into account that the task is waiting for IO - turns out
++ * to be important for low QD IO.
+ */
+- token = io_schedule_prepare();
++ io_wait = current->in_iowait;
++ if (current_pending_io())
++ current->in_iowait = 1;
+ ret = 1;
+ if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+ ret = -ETIME;
+- io_schedule_finish(token);
++ current->in_iowait = io_wait;
+ return ret;
+ }
+
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index db6221773e43f..8d1c4b3ee7604 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -26,6 +26,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
+ #include <linux/capability.h>
++#include <linux/completion.h>
+ #include <trace/events/xdp.h>
+
+ #include <linux/netdevice.h> /* netif_receive_skb_list */
+@@ -70,6 +71,7 @@ struct bpf_cpu_map_entry {
+ struct rcu_head rcu;
+
+ struct work_struct kthread_stop_wq;
++ struct completion kthread_running;
+ };
+
+ struct bpf_cpu_map {
+@@ -133,11 +135,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
+ * invoked cpu_map_kthread_stop(). Catch any broken behaviour
+ * gracefully and warn once.
+ */
+- struct xdp_frame *xdpf;
++ void *ptr;
+
+- while ((xdpf = ptr_ring_consume(ring)))
+- if (WARN_ON_ONCE(xdpf))
+- xdp_return_frame(xdpf);
++ while ((ptr = ptr_ring_consume(ring))) {
++ WARN_ON_ONCE(1);
++ if (unlikely(__ptr_test_bit(0, &ptr))) {
++ __ptr_clear_bit(0, &ptr);
++ kfree_skb(ptr);
++ continue;
++ }
++ xdp_return_frame(ptr);
++ }
+ }
+
+ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+@@ -157,7 +165,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ static void cpu_map_kthread_stop(struct work_struct *work)
+ {
+ struct bpf_cpu_map_entry *rcpu;
+- int err;
+
+ rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
+
+@@ -167,14 +174,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
+ rcu_barrier();
+
+ /* kthread_stop will wake_up_process and wait for it to complete */
+- err = kthread_stop(rcpu->kthread);
+- if (err) {
+- /* kthread_stop may be called before cpu_map_kthread_run
+- * is executed, so we need to release the memory related
+- * to rcpu.
+- */
+- put_cpu_map_entry(rcpu);
+- }
++ kthread_stop(rcpu->kthread);
+ }
+
+ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
+@@ -302,11 +302,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
+ return nframes;
+ }
+
+-
+ static int cpu_map_kthread_run(void *data)
+ {
+ struct bpf_cpu_map_entry *rcpu = data;
+
++ complete(&rcpu->kthread_running);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* When kthread gives stop order, then rcpu have been disconnected
+@@ -469,6 +469,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ goto free_ptr_ring;
+
+ /* Setup kthread */
++ init_completion(&rcpu->kthread_running);
+ rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
+ "cpumap/%d/map:%d", cpu,
+ map->id);
+@@ -482,6 +483,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ kthread_bind(rcpu->kthread, cpu);
+ wake_up_process(rcpu->kthread);
+
++ /* Make sure kthread has been running, so kthread_stop() will not
++ * stop the kthread prematurely and all pending frames or skbs
++ * will be handled by the kthread before kthread_stop() returns.
++ */
++ wait_for_completion(&rcpu->kthread_running);
++
+ return rcpu;
+
+ free_prog:
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 97052b2dff7ea..c7f13da672c9d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1224,6 +1224,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
+ return 0;
+ }
+
++static int perf_mux_hrtimer_restart_ipi(void *arg)
++{
++ return perf_mux_hrtimer_restart(arg);
++}
++
+ void perf_pmu_disable(struct pmu *pmu)
+ {
+ int *count = this_cpu_ptr(pmu->pmu_disable_count);
+@@ -11137,8 +11142,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
+
+- cpu_function_call(cpu,
+- (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
++ cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx);
+ }
+ cpus_read_unlock();
+ mutex_unlock(&mux_interval_mutex);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 8b3531172d8e2..6352a41380e53 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -662,7 +662,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
+ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
+ {
+- int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
+ struct perf_raw_frag frag = {
+ .copy = ctx_copy,
+ .size = ctx_size,
+@@ -679,8 +678,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ };
+ struct perf_sample_data *sd;
+ struct pt_regs *regs;
++ int nest_level;
+ u64 ret;
+
++ preempt_disable();
++ nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
++
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
+ ret = -EBUSY;
+ goto out;
+@@ -695,6 +698,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ ret = __bpf_perf_event_output(regs, map, flags, sd);
+ out:
+ this_cpu_dec(bpf_event_output_nest_level);
++ preempt_enable();
+ return ret;
+ }
+
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a267c9b6bcef4..756523e5402a8 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops;
+ static void l2cap_sock_init(struct sock *sk, struct sock *parent);
+ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio, int kern);
++static void l2cap_sock_cleanup_listen(struct sock *parent);
+
+ bool l2cap_is_socket(struct socket *sock)
+ {
+@@ -1414,6 +1415,7 @@ static int l2cap_sock_release(struct socket *sock)
+ if (!sk)
+ return 0;
+
++ l2cap_sock_cleanup_listen(sk);
+ bt_sock_unlink(&l2cap_sk_list, sk);
+
+ err = l2cap_sock_shutdown(sock, SHUT_RDWR);
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f6b7436458aeb..0c5e0d2c609e3 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -3330,17 +3330,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
+ int ret;
+
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+- ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
++ ret = wait_for_completion_killable(&lreq->reg_commit_wait);
+ return ret ?: lreq->reg_commit_error;
+ }
+
+-static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
++static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
++ unsigned long timeout)
+ {
+- int ret;
++ long left;
+
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+- ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
+- return ret ?: lreq->notify_finish_error;
++ left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
++ ceph_timeout_jiffies(timeout));
++ if (left <= 0)
++ left = left ?: -ETIMEDOUT;
++ else
++ left = lreq->notify_finish_error; /* completed */
++
++ return left;
+ }
+
+ /*
+@@ -4890,7 +4897,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ linger_submit(lreq);
+ ret = linger_reg_commit_wait(lreq);
+ if (!ret)
+- ret = linger_notify_finish_wait(lreq);
++ ret = linger_notify_finish_wait(lreq,
++ msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
+ else
+ dout("lreq %p failed to initiate notify %d\n", lreq, ret);
+
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index 910ca41cb9e67..4953abee79fea 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -521,8 +521,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
+ return ERR_PTR(-EPERM);
+
+ nla_for_each_nested(nla, nla_stgs, rem) {
+- if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
++ if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
++ if (nla_len(nla) != sizeof(u32))
++ return ERR_PTR(-EINVAL);
+ nr_maps++;
++ }
+ }
+
+ diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 49766446797c1..b055e196f5306 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -4919,13 +4919,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (br_spec) {
+ nla_for_each_nested(attr, br_spec, rem) {
+- if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++ if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
+ if (nla_len(attr) < sizeof(flags))
+ return -EINVAL;
+
+ have_flags = true;
+ flags = nla_get_u16(attr);
+- break;
++ }
++
++ if (nla_type(attr) == IFLA_BRIDGE_MODE) {
++ if (nla_len(attr) < sizeof(u16))
++ return -EINVAL;
+ }
+ }
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index cf1e437ae4875..1f9401d757cbb 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1297,7 +1297,8 @@ set_sndbuf:
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
+- sk->sk_max_pacing_rate = ulval;
++ /* Pairs with READ_ONCE() from sk_getsockopt() */
++ WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
+ sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
+ break;
+ }
+@@ -1455,11 +1456,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case SO_SNDBUF:
+- v.val = sk->sk_sndbuf;
++ v.val = READ_ONCE(sk->sk_sndbuf);
+ break;
+
+ case SO_RCVBUF:
+- v.val = sk->sk_rcvbuf;
++ v.val = READ_ONCE(sk->sk_rcvbuf);
+ break;
+
+ case SO_REUSEADDR:
+@@ -1548,7 +1549,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case SO_RCVLOWAT:
+- v.val = sk->sk_rcvlowat;
++ v.val = READ_ONCE(sk->sk_rcvlowat);
+ break;
+
+ case SO_SNDLOWAT:
+@@ -1642,7 +1643,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ if (!sock->ops->set_peek_off)
+ return -EOPNOTSUPP;
+
+- v.val = sk->sk_peek_off;
++ v.val = READ_ONCE(sk->sk_peek_off);
+ break;
+ case SO_NOFCS:
+ v.val = sock_flag(sk, SOCK_NOFCS);
+@@ -1672,7 +1673,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ case SO_BUSY_POLL:
+- v.val = sk->sk_ll_usec;
++ v.val = READ_ONCE(sk->sk_ll_usec);
+ break;
+ case SO_PREFER_BUSY_POLL:
+ v.val = READ_ONCE(sk->sk_prefer_busy_poll);
+@@ -1680,12 +1681,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ #endif
+
+ case SO_MAX_PACING_RATE:
++ /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
+ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
+ lv = sizeof(v.ulval);
+- v.ulval = sk->sk_max_pacing_rate;
++ v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
+ } else {
+ /* 32bit version */
+- v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
++ v.val = min_t(unsigned long, ~0U,
++ READ_ONCE(sk->sk_max_pacing_rate));
+ }
+ break;
+
+@@ -2898,7 +2901,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim);
+
+ int sk_set_peek_off(struct sock *sk, int val)
+ {
+- sk->sk_peek_off = val;
++ WRITE_ONCE(sk->sk_peek_off, val);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(sk_set_peek_off);
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 86b4e8909ad1e..85d3c62bdfa6a 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -117,7 +117,6 @@ static void sock_map_sk_acquire(struct sock *sk)
+ __acquires(&sk->sk_lock.slock)
+ {
+ lock_sock(sk);
+- preempt_disable();
+ rcu_read_lock();
+ }
+
+@@ -125,7 +124,6 @@ static void sock_map_sk_release(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ {
+ rcu_read_unlock();
+- preempt_enable();
+ release_sock(sk);
+ }
+
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index dc4fb699b56c3..d2981e89d3638 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -946,7 +946,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ return -EOPNOTSUPP;
+
+ ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
+- tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
++ tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
+ NULL);
+ if (ret)
+ return ret;
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index d58e672be31c7..5df97aaac252e 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -40,7 +40,7 @@ struct tcp_fastopen_metrics {
+
+ struct tcp_metrics_block {
+ struct tcp_metrics_block __rcu *tcpm_next;
+- possible_net_t tcpm_net;
++ struct net *tcpm_net;
+ struct inetpeer_addr tcpm_saddr;
+ struct inetpeer_addr tcpm_daddr;
+ unsigned long tcpm_stamp;
+@@ -51,34 +51,38 @@ struct tcp_metrics_block {
+ struct rcu_head rcu_head;
+ };
+
+-static inline struct net *tm_net(struct tcp_metrics_block *tm)
++static inline struct net *tm_net(const struct tcp_metrics_block *tm)
+ {
+- return read_pnet(&tm->tcpm_net);
++ /* Paired with the WRITE_ONCE() in tcpm_new() */
++ return READ_ONCE(tm->tcpm_net);
+ }
+
+ static bool tcp_metric_locked(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+ {
+- return tm->tcpm_lock & (1 << idx);
++ /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
++ return READ_ONCE(tm->tcpm_lock) & (1 << idx);
+ }
+
+-static u32 tcp_metric_get(struct tcp_metrics_block *tm,
++static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+ {
+- return tm->tcpm_vals[idx];
++ /* Paired with WRITE_ONCE() in tcp_metric_set() */
++ return READ_ONCE(tm->tcpm_vals[idx]);
+ }
+
+ static void tcp_metric_set(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx,
+ u32 val)
+ {
+- tm->tcpm_vals[idx] = val;
++ /* Paired with READ_ONCE() in tcp_metric_get() */
++ WRITE_ONCE(tm->tcpm_vals[idx], val);
+ }
+
+ static bool addr_same(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+ {
+- return inetpeer_addr_cmp(a, b) == 0;
++ return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
+ }
+
+ struct tcpm_hash_bucket {
+@@ -89,6 +93,7 @@ static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
+ static unsigned int tcp_metrics_hash_log __read_mostly;
+
+ static DEFINE_SPINLOCK(tcp_metrics_lock);
++static DEFINE_SEQLOCK(fastopen_seqlock);
+
+ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ const struct dst_entry *dst,
+@@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ u32 msval;
+ u32 val;
+
+- tm->tcpm_stamp = jiffies;
++ WRITE_ONCE(tm->tcpm_stamp, jiffies);
+
+ val = 0;
+ if (dst_metric_locked(dst, RTAX_RTT))
+@@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ val |= 1 << TCP_METRIC_CWND;
+ if (dst_metric_locked(dst, RTAX_REORDERING))
+ val |= 1 << TCP_METRIC_REORDERING;
+- tm->tcpm_lock = val;
++ /* Paired with READ_ONCE() in tcp_metric_locked() */
++ WRITE_ONCE(tm->tcpm_lock, val);
+
+ msval = dst_metric_raw(dst, RTAX_RTT);
+- tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
++ tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
+
+ msval = dst_metric_raw(dst, RTAX_RTTVAR);
+- tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
+- tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
+- tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
+- tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
++ tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
++ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
++ dst_metric_raw(dst, RTAX_SSTHRESH));
++ tcp_metric_set(tm, TCP_METRIC_CWND,
++ dst_metric_raw(dst, RTAX_CWND));
++ tcp_metric_set(tm, TCP_METRIC_REORDERING,
++ dst_metric_raw(dst, RTAX_REORDERING));
+ if (fastopen_clear) {
++ write_seqlock(&fastopen_seqlock);
+ tm->tcpm_fastopen.mss = 0;
+ tm->tcpm_fastopen.syn_loss = 0;
+ tm->tcpm_fastopen.try_exp = 0;
+ tm->tcpm_fastopen.cookie.exp = false;
+ tm->tcpm_fastopen.cookie.len = 0;
++ write_sequnlock(&fastopen_seqlock);
+ }
+ }
+
+ #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
++static void tcpm_check_stamp(struct tcp_metrics_block *tm,
++ const struct dst_entry *dst)
+ {
+- if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
++ unsigned long limit;
++
++ if (!tm)
++ return;
++ limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
++ if (unlikely(time_after(jiffies, limit)))
+ tcpm_suck_dst(tm, dst, false);
+ }
+
+@@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ oldest = deref_locked(tcp_metrics_hash[hash].chain);
+ for (tm = deref_locked(oldest->tcpm_next); tm;
+ tm = deref_locked(tm->tcpm_next)) {
+- if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
++ if (time_before(READ_ONCE(tm->tcpm_stamp),
++ READ_ONCE(oldest->tcpm_stamp)))
+ oldest = tm;
+ }
+ tm = oldest;
+ } else {
+- tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
++ tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
+ if (!tm)
+ goto out_unlock;
+ }
+- write_pnet(&tm->tcpm_net, net);
++ /* Paired with the READ_ONCE() in tm_net() */
++ WRITE_ONCE(tm->tcpm_net, net);
++
+ tm->tcpm_saddr = *saddr;
+ tm->tcpm_daddr = *daddr;
+
+- tcpm_suck_dst(tm, dst, true);
++ tcpm_suck_dst(tm, dst, reclaim);
+
+ if (likely(!reclaim)) {
+ tm->tcpm_next = tcp_metrics_hash[hash].chain;
+@@ -434,7 +454,7 @@ void tcp_update_metrics(struct sock *sk)
+ tp->reordering);
+ }
+ }
+- tm->tcpm_stamp = jiffies;
++ WRITE_ONCE(tm->tcpm_stamp, jiffies);
+ out_unlock:
+ rcu_read_unlock();
+ }
+@@ -539,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
+ return ret;
+ }
+
+-static DEFINE_SEQLOCK(fastopen_seqlock);
+-
+ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie)
+ {
+@@ -647,7 +665,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
+ }
+
+ if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
+- jiffies - tm->tcpm_stamp,
++ jiffies - READ_ONCE(tm->tcpm_stamp),
+ TCP_METRICS_ATTR_PAD) < 0)
+ goto nla_put_failure;
+
+@@ -658,7 +676,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
+ if (!nest)
+ goto nla_put_failure;
+ for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
+- u32 val = tm->tcpm_vals[i];
++ u32 val = tcp_metric_get(tm, i);
+
+ if (!val)
+ continue;
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 91f1c5f56d5fa..ee094645c7cea 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1068,7 +1068,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
+ And all this only to mangle msg->im6_msgtype and
+ to set msg->im6_mbz to "mbz" :-)
+ */
+- skb_push(skb, -skb_network_offset(pkt));
++ __skb_pull(skb, skb_network_offset(pkt));
+
+ skb_push(skb, sizeof(*msg));
+ skb_reset_transport_header(skb);
+diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
+index ea52c320f67c4..a2f53aee39097 100644
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
+ return -ENOBUFS;
+
+ fnew->id = f->id;
+- fnew->res = f->res;
+ fnew->ifindex = f->ifindex;
+ fnew->tp = f->tp;
+
+diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
+index 48712bc51bda7..194468d0355a1 100644
+--- a/net/sched/cls_route.c
++++ b/net/sched/cls_route.c
+@@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
+ if (fold) {
+ f->id = fold->id;
+ f->iif = fold->iif;
+- f->res = fold->res;
+ f->handle = fold->handle;
+
+ f->tp = fold->tp;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 0025fa837e857..17edcf1d1c3b6 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -812,7 +812,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
+
+ new->ifindex = n->ifindex;
+ new->fshift = n->fshift;
+- new->res = n->res;
+ new->flags = n->flags;
+ RCU_INIT_POINTER(new->ht_down, ht);
+
+@@ -999,18 +998,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ return -EINVAL;
+ }
+
++ /* At this point, we need to derive the new handle that will be used to
++ * uniquely map the identity of this table match entry. The
++ * identity of the entry that we need to construct is 32 bits made of:
++ * htid(12b):bucketid(8b):node/entryid(12b)
++ *
++ * At this point _we have the table(ht)_ in which we will insert this
++ * entry. We carry the table's id in variable "htid".
++ * Note that earlier code picked the ht selection either by a) the user
++ * providing the htid specified via TCA_U32_HASH attribute or b) when
++ * no such attribute is passed then the root ht, is default to at ID
++ * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
++ * If OTOH the user passed us the htid, they may also pass a bucketid of
++ * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
++ * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
++ * passed via the htid, so even if it was non-zero it will be ignored.
++ *
++ * We may also have a handle, if the user passed one. The handle also
++ * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
++ * Rule: the bucketid on the handle is ignored even if one was passed;
++ * rather the value on "htid" is always assumed to be the bucketid.
++ */
+ if (handle) {
++ /* Rule: The htid from handle and tableid from htid must match */
+ if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
+ return -EINVAL;
+ }
+- handle = htid | TC_U32_NODE(handle);
+- err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
+- GFP_KERNEL);
+- if (err)
+- return err;
+- } else
++ /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
++ * need to finalize the table entry identification with the last
++ * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
++ * entries. Rule: nodeid of 0 is reserved only for tables(see
++ * earlier code which processes TC_U32_DIVISOR attribute).
++ * Rule: The nodeid can only be derived from the handle (and not
++ * htid).
++ * Rule: if the handle specified zero for the node id example
++ * 0x60000000, then pick a new nodeid from the pool of IDs
++ * this hash table has been allocating from.
++ * If OTOH it is specified (i.e for example the user passed a
++ * handle such as 0x60000123), then we use it generate our final
++ * handle which is used to uniquely identify the match entry.
++ */
++ if (!TC_U32_NODE(handle)) {
++ handle = gen_new_kid(ht, htid);
++ } else {
++ handle = htid | TC_U32_NODE(handle);
++ err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
++ handle, GFP_KERNEL);
++ if (err)
++ return err;
++ }
++ } else {
++ /* The user did not give us a handle; lets just generate one
++ * from the table's pool of nodeids.
++ */
+ handle = gen_new_kid(ht, htid);
++ }
+
+ if (tb[TCA_U32_SEL] == NULL) {
+ NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index e203deacc9533..e40b4425eb6b5 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -780,6 +780,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
+ [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
+ };
+
++static struct netlink_range_validation_signed taprio_cycle_time_range = {
++ .min = 0,
++ .max = INT_MAX,
++};
++
+ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+ [TCA_TAPRIO_ATTR_PRIOMAP] = {
+ .len = sizeof(struct tc_mqprio_qopt)
+@@ -788,7 +793,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+ [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
+ [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
+ [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
+- [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
++ [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
++ NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
+ [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
+ [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
+@@ -923,6 +929,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ return -EINVAL;
+ }
+
++ if (cycle < 0 || cycle > INT_MAX) {
++ NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
++ return -EINVAL;
++ }
++
+ new->cycle_time = cycle;
+ }
+
+@@ -1127,7 +1138,7 @@ static void setup_txtime(struct taprio_sched *q,
+ struct sched_gate_list *sched, ktime_t base)
+ {
+ struct sched_entry *entry;
+- u32 interval = 0;
++ u64 interval = 0;
+
+ list_for_each_entry(entry, &sched->entries, list) {
+ entry->next_txtime = ktime_add_ns(base, interval);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d326540e4938c..7a076d5017d1c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -717,7 +717,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
+ if (mutex_lock_interruptible(&u->iolock))
+ return -EINTR;
+
+- sk->sk_peek_off = val;
++ WRITE_ONCE(sk->sk_peek_off, val);
+ mutex_unlock(&u->iolock);
+
+ return 0;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index a565476809f02..c7192d7bcbd76 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -641,7 +641,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+
+ ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
+ if (ret)
+- return ret;
++ return 0;
+
+ /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
+ while (pos + sizeof(*ap_info) <= end) {
+diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+index 00d2e0e2e0c28..319f36ebb9a40 100644
+--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
++++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+@@ -4,6 +4,12 @@
+
+ set -e
+
++# skip if there's no gcc
++if ! [ -x "$(command -v gcc)" ]; then
++ echo "failed: no gcc compiler"
++ exit 2
++fi
++
+ temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
+
+ cleanup()
+@@ -11,7 +17,7 @@ cleanup()
+ trap - EXIT TERM INT
+ if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
+ echo "--- Cleaning up ---"
+- perf probe -x ${temp_dir}/testfile -d foo
++ perf probe -x ${temp_dir}/testfile -d foo || true
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 986b9458efb26..b736a5169aad0 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -32,9 +32,17 @@
+ #include "../kselftest.h"
+ #include "rseq.h"
+
+-static const ptrdiff_t *libc_rseq_offset_p;
+-static const unsigned int *libc_rseq_size_p;
+-static const unsigned int *libc_rseq_flags_p;
++/*
++ * Define weak versions to play nice with binaries that are statically linked
++ * against a libc that doesn't support registering its own rseq.
++ */
++__weak ptrdiff_t __rseq_offset;
++__weak unsigned int __rseq_size;
++__weak unsigned int __rseq_flags;
++
++static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
++static const unsigned int *libc_rseq_size_p = &__rseq_size;
++static const unsigned int *libc_rseq_flags_p = &__rseq_flags;
+
+ /* Offset from the thread pointer to the rseq area. */
+ ptrdiff_t rseq_offset;
+@@ -108,10 +116,19 @@ int rseq_unregister_current_thread(void)
+ static __attribute__((constructor))
+ void rseq_init(void)
+ {
+- libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+- libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+- libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
+- if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) {
++ /*
++ * If the libc's registered rseq size isn't already valid, it may be
++ * because the binary is dynamically linked and not necessarily due to
++ * libc not having registered a restartable sequence. Try to find the
++ * symbols if that's the case.
++ */
++ if (!*libc_rseq_size_p) {
++ libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
++ libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
++ libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
++ }
++ if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
++ *libc_rseq_size_p != 0) {
+ /* rseq registration owned by glibc */
+ rseq_offset = *libc_rseq_offset_p;
+ rseq_size = *libc_rseq_size_p;