diff options
author | Otavio Salvador <otavio@ossystems.com.br> | 2023-09-09 14:49:11 -0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-09 14:49:11 -0300 |
commit | 3a95b5654979d1c2d61616bf60249ed3a98dcfbc (patch) | |
tree | 0184e607556f6621e4baf9c5126ffee100f4758c | |
parent | c099d7415137daf904adcd5fdd73a25024627587 (diff) | |
parent | ba2847ffebda28ba7be1b96bb506bfe5b67c856e (diff) |
Merge pull request #641 from eghidoli/5.15-2.2.x-imx_v5.15.129
5.15 2.2.x imx bump to v5.15.129
426 files changed, 6658 insertions, 4078 deletions
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 2f923c805802..f79cb11b080f 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 076861b0f5ac..83a75e16e54d 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -104,6 +104,10 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -112,8 +116,16 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | MMU-600 | #1076982,1209401| N/A | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | MMU-700 | #2268618,2812531| N/A | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/MAINTAINERS b/MAINTAINERS index a6aaa5176107..e4e0a4e6fc00 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1259,7 +1259,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER M: William Breathitt Gray <vilhelm.gray@gmail.com> L: linux-iio@vger.kernel.org S: Maintained -F: drivers/iio/adc/stx104.c +F: drivers/iio/addac/stx104.c APM DRIVER M: Jiri Kosina <jikos@kernel.org> @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 125 +SUBLEVEL = 129 EXTRAVERSION = NAME = Trick or Treat diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b4fbbba30aa2..8c4c14a171e2 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -385,8 +385,7 @@ setup_memory(void *kernel_end) #endif /* CONFIG_BLK_DEV_INITRD */ } -int __init -page_is_ram(unsigned long pfn) +int page_is_ram(unsigned long pfn) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts index 9b4cf5ebe6d5..c62aff908ab4 100644 --- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -63,7 +63,7 @@ status = "okay"; m25p,fast-read; label = "bmc"; - spi-max-frequency = <100000000>; /* 100 MHz */ + spi-max-frequency = <50000000>; /* 50 MHz */ #include "openbmc-flash-layout.dtsi" }; }; diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts index 5ac84445e9cc..90e01de8c2c1 100644 --- a/arch/arm/boot/dts/imx6dl-prtrvt.dts +++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts @@ -126,6 +126,10 @@ status = "disabled"; }; +&usbotg { + disable-over-current; +}; + &vpu { status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi index 19578f660b09..70dfa07a1698 100644 --- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi +++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi @@ -69,6 +69,7 @@ vbus-supply = <®_usb_h1_vbus>; phy_type = "utmi"; dr_mode = "host"; + disable-over-current; status = "okay"; }; @@ -78,10 +79,18 @@ pinctrl-0 = <&pinctrl_usbotg>; phy_type = "utmi"; dr_mode = "host"; - disable-over-current; + over-current-active-low; status = "okay"; }; +&usbphynop1 { + status = "disabled"; +}; + +&usbphynop2 { + status = "disabled"; +}; + &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 8f00cce369dd..e439a03ae1bf 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -51,20 +51,18 @@ device_type = "cpu"; reg = <0>; next-level-cache = <&L2>; - operating-points = < + operating-points = /* kHz uV */ - 996000 1275000 - 792000 1175000 - 396000 1075000 - 198000 975000 - >; - fsl,soc-operating-points = < + <996000 1275000>, + <792000 1175000>, + <396000 1075000>, + <198000 975000>; + fsl,soc-operating-points = /* ARM kHz SOC-PU uV */ - 996000 1175000 - 792000 1175000 - 396000 1175000 - 198000 1175000 - >; + <996000 1175000>, + <792000 1175000>, + <396000 1175000>, + <198000 1175000>; clock-latency = <61036>; /* two CLK32 periods */ #cooling-cells = <2>; fsl,low-power-run; @@ -592,7 +590,7 @@ reg = <0x020ca000 0x1000>; interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SLL_CLK_USBPHY2>; - phy-reg_3p0-supply = <®_3p0>; + phy-3p0-supply = <®_3p0>; fsl,anatop = <&anatop>; }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b64cb7606ae1..84c2bae8a212 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -774,6 +774,80 @@ config ARM64_ERRATUM_2139208 If unsure, say Y. +config ARM64_WORKAROUND_TSB_FLUSH_FAILURE + bool + +config ARM64_ERRATUM_2054223 + bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Cortex-A710 erratum 2054223 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_ERRATUM_2067961 + bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Neoverse-N2 erratum 2067961 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + bool + +config ARM64_ERRATUM_2253138 + bool "Neoverse-N2: 2253138: workaround TRBE writing to address out-of-range" + depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Neoverse-N2 erratum 2253138. + + Affected Neoverse-N2 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + +config ARM64_ERRATUM_2224489 + bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range" + depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Cortex-A710 erratum 2224489. + + Affected Cortex-A710 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 46e558ab7729..f0e8af12442a 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -129,7 +129,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts index f9b4a39683cf..92ac3c86ebd5 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts @@ -162,7 +162,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi index d053ef302fb8..faafefe562e4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi @@ -351,7 +351,7 @@ MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f - MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 + MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159 >; }; diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 0ce2d36ab257..d3449cb52def 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -113,7 +113,7 @@ }; }; - pm8150l-thermal { + pm8150l-pcb-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&pm8150l_adc_tm 1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index a7ec81657503..8b70e831aff2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -595,9 +595,9 @@ }; &sdhci { + max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; + mmc-hs200-1_8v; non-removable; status = "okay"; }; diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 7a302423b6cd..5bd4e5ac24bf 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -23,7 +23,7 @@ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory") -#define tsb_csync() asm volatile("hint #18" : : : "memory") +#define __tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") #ifdef CONFIG_ARM64_PSEUDO_NMI @@ -46,6 +46,20 @@ #define dma_rmb() dmb(ld) #define dma_wmb() dmb(st) + +#define tsb_csync() \ + do { \ + /* \ + * CPUs affected by Arm Erratum 2054223 or 2067961 needs \ + * another TSB to ensure the trace is flushed. The barriers \ + * don't have to be strictly back to back, as long as the \ + * CPU is in trace prohibited state. \ + */ \ + if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \ + __tsb_csync(); \ + __tsb_csync(); \ + } while (0) + /* * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz * and 0 otherwise. diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index d810d4b7b438..bf69a20bc27f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -375,6 +375,30 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { }; #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE +static const struct midr_range tsb_flush_fail_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2067961 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2054223 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ + +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE +static struct midr_range trbe_write_out_of_range_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2253138 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2224489 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -607,6 +631,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = { CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), }, #endif +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE + { + .desc = "ARM erratum 2067961 or 2054223", + .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, + ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), + }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + { + .desc = "ARM erratum 2253138 or 2224489", + .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), + }, +#endif { } }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 32fe50a3a26c..fcaeec5a5125 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -57,6 +57,8 @@ WORKAROUND_1542419 WORKAROUND_1742098 WORKAROUND_2457168 WORKAROUND_TRBE_OVERWRITE_FILL_MODE +WORKAROUND_TSB_FLUSH_FAILURE +WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_CAVIUM_23154 WORKAROUND_CAVIUM_27456 WORKAROUND_CAVIUM_30115 diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 3d71081afc55..e69833213e79 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -124,7 +124,24 @@ #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE) #endif #ifndef cpu_has_octeon_cache -#define cpu_has_octeon_cache 0 +#define cpu_has_octeon_cache \ +({ \ + int __res; \ + \ + switch (boot_cpu_type()) { \ + case CPU_CAVIUM_OCTEON: \ + case CPU_CAVIUM_OCTEON_PLUS: \ + case CPU_CAVIUM_OCTEON2: \ + case CPU_CAVIUM_OCTEON3: \ + __res = 1; \ + break; \ + \ + default: \ + __res = 0; \ + } \ + \ + __res; \ +}) #endif /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ #ifndef cpu_has_fpu @@ -351,7 +368,7 @@ ({ \ int __res; \ \ - switch (current_cpu_type()) { \ + switch (boot_cpu_type()) { \ case CPU_M14KC: \ case CPU_74K: \ case CPU_1074K: \ diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index f3f4710d4ff5..99129b0cd8b8 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask) return leading_zero_bits >> 3; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index a99179d83538..56bd0aa30f93 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -710,9 +710,9 @@ static int __init rtas_flash_init(void) if (!rtas_validate_flash_data.buf) return -ENOMEM; - flash_block_cache = kmem_cache_create("rtas_flash_cache", - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, - NULL); + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", + RTAS_BLK_SIZE, RTAS_BLK_SIZE, + 0, 0, RTAS_BLK_SIZE, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index b76cd49d521b..db040f34c004 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; - alt_end = altmap->base_pfn + altmap->reserve + - altmap->free + altmap->alloc + altmap->align; + alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index bb1a5408b86b..8636b17c6a20 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 KASAN_SANITIZE := n +KCOV_INSTRUMENT := n obj-$(CONFIG_PPC32) += kasan_init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index aff6c33ab0c0..4c58ee7f95ec 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. These are defined to order the indicated access (either a read or - * write) with all other I/O memory accesses. Since the platform specification - * defines that all I/O regions are strongly ordered on channel 2, no explicit - * fences are required to enforce this ordering. + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. */ /* FIXME: These are now the same as asm-generic */ #define __io_rbr() do {} while (0) @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. The memory barriers here are necessary as RISC-V + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") +#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 2c7c1c5026af..4fe436a0eec2 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -19,8 +19,11 @@ ENTRY(__asm_copy_from_user) li t6, SR_SUM csrs CSR_STATUS, t6 - /* Save for return value */ - mv t5, a2 + /* + * Save the terminal address which will be used to compute the number + * of bytes copied in case of a fixup exception. + */ + add t5, a0, a2 /* * Register allocation for code below: @@ -178,7 +181,7 @@ ENTRY(__asm_copy_from_user) 10: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, t5 + sub a0, t5, a0 ret ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) @@ -230,7 +233,7 @@ ENTRY(__clear_user) 11: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, a1 + sub a0, a3, a0 ret ENDPROC(__clear_user) EXPORT_SYMBOL(__clear_user) diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 4d141e2c132e..2ea7f208f0e7 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc) * * Fills the destination with system information returned by the STHYI * instruction. The data is generated by emulation or execution of STHYI, - * if available. The return value is the condition code that would be - * returned, the rc parameter is the return code which is passed in - * register R2 + 1. + * if available. The return value is either a negative error value or + * the condition code that would be returned, the rc parameter is the + * return code which is passed in register R2 + 1. */ int sthyi_fill(void *dst, u64 *rc) { diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index aeb0e0865e89..458b42b50b8c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) */ int handle_sthyi(struct kvm_vcpu *vcpu) { - int reg1, reg2, r = 0; - u64 code, addr, cc = 0, rc = 0; + int reg1, reg2, cc = 0, r = 0; + u64 code, addr, rc = 0; struct sthyi_sctns *sctns = NULL; if (!test_kvm_facility(vcpu->kvm, 74)) @@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu) return -ENOMEM; cc = sthyi_fill(sctns, &rc); - + if (cc < 0) { + free_page((unsigned long)sctns); + return cc; + } out: if (!cc) { if (kvm_s390_pv_cpu_is_protected(vcpu)) { diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1000d457c332..a380f7ecdd54 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Round the lowest possible end address up to a PMD boundary. */ end = (start + len + PMD_SIZE - 1) & PMD_MASK; - if (end >= TASK_SIZE_MAX) - end = TASK_SIZE_MAX; + if (end >= DEFAULT_MAP_WINDOW) + end = DEFAULT_MAP_WINDOW; end -= len; if (end > start) { diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 608ffc45fc0e..d6089072ee41 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -465,4 +465,5 @@ /* BUG word 2 */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 43184640b579..a12fdf01dc26 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index d4201fb2c46d..6a6f741edda3 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -416,8 +416,7 @@ DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * - * Either one of these invalidation functions is enough. Invalidate - * a resource you control: CPU if using the CPU for something else + * Invalidate a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4a12dfdd317c..940c15ee5650 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -156,9 +156,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -166,7 +166,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -177,14 +177,9 @@ defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END ALTERNATIVE_2 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS -#endif .endm #else /* __ASSEMBLY__ */ @@ -195,10 +190,21 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 747ccc2ae383..bbbf27cfe701 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -804,10 +804,12 @@ extern u16 get_llc_id(unsigned int cpu); extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) @@ -857,4 +859,6 @@ enum mds_mitigations { MDS_MITIGATION_VMWERV, }; +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1b90eb6ea503..0a0230bd5089 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -73,8 +73,13 @@ static const int amd_erratum_1054[] = static const int amd_zenbleed[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1140,6 +1145,11 @@ static void init_amd(struct cpuinfo_x86 *c) check_null_seg_clears_base(c); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1300,3 +1310,14 @@ void amd_check_microcode(void) { on_each_cpu(zenbleed_check_cpu, NULL, 1); } + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 73dad1400633..0d2c5fe84141 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -164,8 +166,13 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - gds_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); + gds_select_mitigation(); } /* @@ -1013,6 +1020,9 @@ do_cmd_auto: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2388,9 +2398,10 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2419,11 +2430,15 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); @@ -2672,6 +2687,9 @@ static ssize_t gds_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 759e1cef5e69..3ad1bf5de737 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -330,7 +330,7 @@ static void fpu_reset_fpstate(void) struct fpu *fpu = ¤t->thread.fpu; fpregs_lock(); - fpu__drop(fpu); + __fpu_invalidate_fpregs_state(fpu); /* * This does not change the actual hardware registers. It just * resets the memory image and sets TIF_NEED_FPU_LOAD so a diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 8bbf37c0bebe..81891f0fff6f 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -809,6 +809,13 @@ void __init fpu__init_system_xstate(void) goto out_disable; } + /* + * CPU capabilities initialization runs before FPU init. So + * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely + * functional, set the feature bit so depending code works. + */ + setup_force_cpu_cap(X86_FEATURE_OSXSAVE); + print_xstate_offset_size(); pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", xfeatures_mask_all, diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index 2fc4f96702e6..b48b659ccf6f 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -135,6 +135,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 50aaf0cd8f46..ca1a7595edac 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,18 +134,18 @@ SECTIONS KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT @@ -155,8 +155,8 @@ SECTIONS #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) __indirect_thunk_end = .; #endif } :text =0xcccc @@ -511,18 +511,24 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif - #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +#ifdef CONFIG_RETHUNK +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif #ifdef CONFIG_CPU_SRSO /* - * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR + * GNU ld cannot do XOR until 2.41. + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 + * + * LLVM lld cannot do XOR until lld-17. + * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb + * + * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - - (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 6c2bb60ccd88..7a64fb238044 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -10,7 +10,7 @@ #include <asm/cmpxchg.h> #include <trace/events/kvm.h> -static bool __read_mostly tdp_mmu_enabled = true; +static bool __read_mostly tdp_mmu_enabled = false; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); /* Initializes the TDP MMU for the VM, if enabled. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d63c3843e493..8e9a6c41f9ee 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1452,6 +1452,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) sev_es_unmap_ghcb(svm); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2686c4dcdb1a..a26200c3e82b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -303,8 +303,6 @@ static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; -extern bool gds_ucode_mitigated(void); - /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5f7eed97487e..6f5321b36dbb 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,7 +11,7 @@ #include <asm/frame.h> #include <asm/nops.h> - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL @@ -75,74 +75,105 @@ SYM_CODE_END(__x86_indirect_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ASM_NOP2 lfence - jmp __x86_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) - .section .text.__x86.rethunk_safe + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc -SYM_FUNC_START_NOALIGN(zen_untrain_ret); + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_FUNC_START_NOALIGN(retbleed_untrain_ret); /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -154,13 +185,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -171,16 +202,16 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -191,22 +222,44 @@ __EXPORT_THUNK(zen_untrain_ret) SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 + /* end of movabs */ lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a4749b6c3d73..cbbed43baf05 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -6412,6 +6412,7 @@ err_init_binder_device_failed: err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index db01c5d423e6..54cee2b31c8e 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1091,6 +1091,12 @@ int binder_alloc_shrinker_init(void) return ret; } +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} + /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 7dea57a84c79..399f2b269f2c 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -131,6 +131,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 54292cdd7808..922ed457db19 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ - WAKE_IRQ_DEDICATED_MANAGED) + WAKE_IRQ_DEDICATED_MANAGED | \ + WAKE_IRQ_DEDICATED_REVERSE) +#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) struct wake_irq { struct device *dev; @@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_enable_wake_irq_check(struct device *dev, bool can_change_status); -extern void dev_pm_disable_wake_irq_check(struct device *dev); +extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); +extern void dev_pm_enable_wake_irq_complete(struct device *dev); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c1142a7a4fe6..5824d41a0b74 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -673,6 +673,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) if (retval) goto fail; + dev_pm_enable_wake_irq_complete(dev); + no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -718,7 +720,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, true); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -901,7 +903,7 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, false); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index b91a3a9bf9f6..6f2cdd8643af 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) return IRQ_HANDLED; } -/** - * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt - * @dev: Device entry - * @irq: Device wake-up interrupt - * - * Unless your hardware has separate wake-up interrupts in addition - * to the device IO interrupts, you don't need this. - * - * Sets up a threaded interrupt handler for a device that has - * a dedicated wake-up interrupt in addition to the device IO - * interrupt. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() - * functions. - */ -int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) { struct wake_irq *wirq; int err; @@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) if (err) goto err_free_irq; - wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; return err; @@ -210,9 +193,58 @@ err_free: return err; } + + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); +} EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); /** + * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt + * with reverse enable ordering + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has a dedicated + * wake-up interrupt in addition to the device IO interrupt. It sets + * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() + * to enable dedicated wake-up interrupt after running the runtime suspend + * callback for @dev. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); + +/** * dev_pm_enable_wake_irq - Enable device wake-up interrupt * @dev: Device * @@ -282,25 +314,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev, return; enable: - enable_irq(wirq->irq); + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { + enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt * @dev: Device + * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE * * Disables wake-up interrupt conditionally based on status. * Should be only called from rpm_suspend() and rpm_resume() path. */ -void dev_pm_disable_wake_irq_check(struct device *dev) +void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) { struct wake_irq *wirq = dev->power.wakeirq; if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) return; - if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) + if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; disable_irq_nosync(wirq->irq); + } +} + +/** + * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before + * @dev: Device using the wake IRQ + * + * Enable wake IRQ conditionally based on status, mainly used if want to + * enable wake IRQ after running ->runtime_suspend() which depends on + * WAKE_IRQ_DEDICATED_REVERSE. + * + * Should be only called from rpm_suspend() path. + */ +void dev_pm_enable_wake_irq_complete(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + enable_irq(wirq->irq); } /** @@ -317,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -340,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) disable_irq_wake(wirq->irq); if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) disable_irq_nosync(wirq->irq); } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c269c552a43a..fe8bdbf4616b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3677,7 +3677,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, RBD_LOCK_TAG, "", 0); - if (ret) + if (ret && ret != -EEXIST) return ret; __rbd_lock(rbd_dev, cookie); @@ -3880,7 +3880,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) &rbd_dev->header_oloc, RBD_LOCK_NAME, &lock_type, &lock_tag, &lockers, &num_lockers); if (ret) { - rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); + rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); return ERR_PTR(ret); } @@ -3942,8 +3942,10 @@ static int find_watcher(struct rbd_device *rbd_dev, ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, &watchers, &num_watchers); - if (ret) + if (ret) { + rbd_warn(rbd_dev, "failed to get watchers: %d", ret); return ret; + } sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); for (i = 0; i < num_watchers; i++) { @@ -3987,8 +3989,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) locker = refreshed_locker = NULL; ret = rbd_lock(rbd_dev); - if (ret != -EBUSY) + if (!ret) + goto out; + if (ret != -EBUSY) { + rbd_warn(rbd_dev, "failed to lock header: %d", ret); goto out; + } /* determine if the current lock holder is still alive */ locker = get_lock_owner_info(rbd_dev); @@ -4091,11 +4097,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { - rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLOCKLISTED) - goto out; - - ret = 1; /* request lock anyway */ + rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); + goto out; } if (ret > 0) { up_write(&rbd_dev->lock_rwsem); @@ -6631,12 +6634,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) cancel_delayed_work_sync(&rbd_dev->lock_dwork); if (!ret) ret = -ETIMEDOUT; - } - if (ret) { - rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); - return ret; + rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); } + if (ret) + return ret; /* * The lock may have been released by now, unless automatic lock diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 9eb2267bd3a0..15d253325fd8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -475,6 +475,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 7d508f905003..71b541538801 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2089,6 +2089,8 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + /* Flush posted write */ + sysc_val = sysc_read_sysconfig(ddata); } if (ddata->cfg.srst_udelay) diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 4fb4fd4b06bd..737aa70e2cb3 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put); struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); clk = of_clk_get_by_name(np, con_id); if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); + state->clk = clk; + devres_add(dev, state); } else { - devres_free(ptr); + devres_free(state); } return clk; diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 348b3a9170fa..7f5ed1aa7a9f 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = { */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { + LIST_HEAD(signalled); struct sync_pt *pt, *next; trace_sync_timeline(obj); @@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) if (!timeline_fence_signaled(&pt->base)) break; - list_del_init(&pt->link); + dma_fence_get(&pt->base); + + list_move_tail(&pt->link, &signalled); rb_erase(&pt->node, &obj->pt_tree); - /* - * A signal callback may release the last reference to this - * fence, causing it to be freed. That operation has to be - * last to avoid a use after free inside this loop, and must - * be after we remove the fence from the timeline in order to - * prevent deadlocking on timeline->lock inside - * timeline_fence_release(). - */ dma_fence_signal_locked(&pt->base); } spin_unlock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &signalled, link) { + list_del_init(&pt->link); + dma_fence_put(&pt->base); + } } /** diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e12b754e6398..60d3c5f09ad6 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev) return -EINVAL; } - chans = pdata->dma_channels; + if (!pdata->dma_channels) { + dev_info(&pdev->dev, "setting default channel number to 64"); + chans = 64; + } else { + chans = pdata->dma_channels; + } + len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans; mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); if (!mcf_edma) @@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; - if (!mcf_edma->n_chans) { - dev_info(&pdev->dev, "setting default channel number to 64"); - mcf_edma->n_chans = 64; - } - mutex_init(&mcf_edma->fsl_edma_mutex); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b9bc82d6a162..ec8a1565630b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -404,6 +404,12 @@ enum desc_status { */ BUSY, /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, + /* * Sitting on the channel work_list but xfer done * by PL330 core */ @@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan) _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, case DONE: ret = DMA_COMPLETE; break; + case PAUSED: + ret = DMA_PAUSED; + break; case PREP: case BUSY: ret = DMA_IN_PROGRESS; diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 4c3fd2eed1da..beba0a56bb9a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -488,7 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { - int status; + int status, len; switch (ether_type) { case ETH_P_ARP: @@ -542,13 +542,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net, } skb->protocol = protocol; } + + len = skb->len; status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; - net->stats.rx_bytes += skb->len; + net->stats.rx_bytes += len; } return 0; diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index ed9b83aee8bd..d1400de17eca 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -106,8 +106,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; shmem = of_parse_phandle(cdev->of_node, "shmem", idx); - if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { + of_node_put(shmem); return -ENXIO; + } ret = of_address_to_resource(shmem, 0, &res); of_node_put(shmem); diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 4effecc3bb46..ea1caf70e8df 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -76,8 +76,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; np = of_parse_phandle(cdev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + of_node_put(np); return -ENXIO; + } ret = of_address_to_resource(np, 0, &res); of_node_put(np); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2fd4d8ad7e40..4b01188385b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1541,15 +1541,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 642acfc9f0b1..2b5766d3789b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4066,6 +4066,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_fbdev_set_suspend(adev, 1); cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5e32906f9819..252712f930f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -579,15 +579,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0e4554950e07..788611a50a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2260,6 +2260,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); @@ -2974,7 +2975,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); mutex_init(&vm->eviction_lock); @@ -3117,7 +3119,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_sdma_funcs; } dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; /* Free the shadow bo for compute VM */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7bd38d927b18..4cf33abfb7cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8566,27 +8566,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, } #ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, - const struct drm_connector_state *old_state, - const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, + struct drm_crtc_state *old_crtc_state, + struct drm_connector_state *new_conn_state, + struct drm_connector_state *old_conn_state, + const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - /* Handle: Type0/1 change */ - if (old_state->hdcp_content_type != state->hdcp_content_type && - state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_conn_state->content_protection, new_conn_state->content_protection); + + if (old_crtc_state) + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* hdcp content type change */ + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); return true; } - /* CP is being re enabled, ignore this - * - * Handles: ENABLED -> DESIRED - */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + /* CP is being re enabled, ignore this */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); + return true; + }; + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); return false; } @@ -8594,9 +8622,9 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: UNDESIRED -> ENABLED */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* Stream removed and re-enabled * @@ -8606,10 +8634,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (!(old_state->crtc && old_state->crtc->enabled) && - state->crtc && state->crtc->enabled && + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && + new_conn_state->crtc && new_conn_state->crtc->enabled && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", + __func__); return true; } @@ -8621,35 +8651,42 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && - connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { + if (dm_con_state->update_hdcp && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", + __func__); return true; } - /* - * Handles: UNDESIRED -> UNDESIRED - * DESIRED -> DESIRED - * ENABLED -> ENABLED - */ - if (old_state->content_protection == state->content_protection) + if (old_conn_state->content_protection == new_conn_state->content_protection) { + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", + __func__); + return true; + }; + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", + __func__); + return false; + }; + + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); return false; + } - /* - * Handles: UNDESIRED -> DESIRED - * DESIRED -> UNDESIRED - * ENABLED -> UNDESIRED - */ - if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", + __func__); return true; + } - /* - * Handles: DESIRED -> ENABLED - */ + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); return false; } - #endif + static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -9597,10 +9634,67 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (!adev->dm.hdcp_workqueue) + continue; + + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + + if (!connector) + continue; + + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_con_state->content_protection, new_con_state->content_protection); + + if (aconnector->dc_sink) { + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { + pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", + aconnector->dc_sink->edid_caps.display_name); + } + } + new_crtc_state = NULL; + old_crtc_state = NULL; - if (acrtc) + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + if (old_crtc_state) + pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + } + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (!adev->dm.hdcp_workqueue) + continue; + + new_crtc_state = NULL; + old_crtc_state = NULL; + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -9612,11 +9706,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, + old_con_state, connector, adev->dm.hdcp_workqueue)) { + /* when display is unplugged from mst hub, connctor will + * be destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + + bool enable_encryption = false; + + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + if (aconnector->dc_link && aconnector->dc_sink && + aconnector->dc_link->type == dc_connection_mst_branch) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + hdcp_w->hdcp_content_type[connector->index] = + new_con_state->hdcp_content_type; + hdcp_w->content_protection[connector->index] = + new_con_state->content_protection; + } + + if (new_crtc_state && new_crtc_state->mode_changed && + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); + new_con_state->hdcp_content_type, enable_encryption); + } } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 09294ff122fe..bbbf7d0eff82 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -52,6 +52,20 @@ struct hdcp_workqueue { struct mod_hdcp_link link; enum mod_hdcp_encryption_status encryption_status; + + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + /* un-desired, desired, enabled */ + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* hdcp1.x, hdcp2.x */ + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; + uint8_t max_link; uint8_t *srm; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7a3fee71a867..0b58a9386449 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -32,6 +32,10 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_mst_types.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif + #include "dc.h" #include "dm_helpers.h" @@ -315,6 +319,32 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (aconnector->dc_sink && connector->state) { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + if (adev->dm.hdcp_workqueue) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } + } +#endif + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( connector, aconnector->edid); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 73457c32f3e7..aa5a1fa68da0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3142,7 +3142,9 @@ void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 23a52d47e61c..0601c17426af 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -355,8 +355,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 0c2968052b66..54f1ab3071f9 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -591,7 +591,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); /* Drop the reference drm_gem_mmap_obj() acquired.*/ diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 2f640b9fdf4a..3034ce392ac1 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -447,8 +447,11 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) } } while (unlikely(is_barrier(active))); - if (!__i915_active_fence_set(active, fence)) + fence = __i915_active_fence_set(active, fence); + if (!fence) __i915_active_acquire(ref); + else + dma_fence_put(fence); out: i915_active_release(ref); @@ -467,13 +470,9 @@ __i915_active_set_fence(struct i915_active *ref, return NULL; } - rcu_read_lock(); prev = __i915_active_fence_set(active, fence); - if (prev) - prev = dma_fence_get_rcu(prev); - else + if (!prev) __i915_active_acquire(ref); - rcu_read_unlock(); return prev; } @@ -1040,10 +1039,11 @@ void i915_request_add_active_barriers(struct i915_request *rq) * * Records the new @fence as the last active fence along its timeline in * this active tracker, moving the tracking callbacks from the previous - * fence onto this one. Returns the previous fence (if not already completed), - * which the caller must ensure is executed before the new fence. To ensure - * that the order of fences within the timeline of the i915_active_fence is - * understood, it should be locked by the caller. + * fence onto this one. Gets and returns a reference to the previous fence + * (if not already completed), which the caller must put after making sure + * that it is executed before the new fence. To ensure that the order of + * fences within the timeline of the i915_active_fence is understood, it + * should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -1052,7 +1052,23 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - if (fence == rcu_access_pointer(active->fence)) + /* + * In case of fences embedded in i915_requests, their memory is + * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release + * by new requests. Then, there is a risk of passing back a pointer + * to a new, completely unrelated fence that reuses the same memory + * while tracked under a different active tracker. Combined with i915 + * perf open/close operations that build await dependencies between + * engine kernel context requests and user requests from different + * timelines, this can lead to dependency loops and infinite waits. + * + * As a countermeasure, we try to get a reference to the active->fence + * first, so if we succeed and pass it back to our user then it is not + * released and potentially reused by an unrelated request before the + * user has a chance to set up an await dependency on it. + */ + prev = i915_active_fence_get(active); + if (fence == prev) return fence; GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); @@ -1061,27 +1077,56 @@ __i915_active_fence_set(struct i915_active_fence *active, * Consider that we have two threads arriving (A and B), with * C already resident as the active->fence. * - * A does the xchg first, and so it sees C or NULL depending - * on the timing of the interrupt handler. If it is NULL, the - * previous fence must have been signaled and we know that - * we are first on the timeline. If it is still present, - * we acquire the lock on that fence and serialise with the interrupt - * handler, in the process removing it from any future interrupt - * callback. A will then wait on C before executing (if present). - * - * As B is second, it sees A as the previous fence and so waits for - * it to complete its transition and takes over the occupancy for - * itself -- remembering that it needs to wait on A before executing. + * Both A and B have got a reference to C or NULL, depending on the + * timing of the interrupt handler. Let's assume that if A has got C + * then it has locked C first (before B). * * Note the strong ordering of the timeline also provides consistent * nesting rules for the fence->lock; the inner lock is always the * older lock. */ spin_lock_irqsave(fence->lock, flags); - prev = xchg(__active_fence_slot(active), fence); - if (prev) { - GEM_BUG_ON(prev == fence); + if (prev) spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + + /* + * A does the cmpxchg first, and so it sees C or NULL, as before, or + * something else, depending on the timing of other threads and/or + * interrupt handler. If not the same as before then A unlocks C if + * applicable and retries, starting from an attempt to get a new + * active->fence. Meanwhile, B follows the same path as A. + * Once A succeeds with cmpxch, B fails again, retires, gets A from + * active->fence, locks it as soon as A completes, and possibly + * succeeds with cmpxchg. + */ + while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { + if (prev) { + spin_unlock(prev->lock); + dma_fence_put(prev); + } + spin_unlock_irqrestore(fence->lock, flags); + + prev = i915_active_fence_get(active); + GEM_BUG_ON(prev == fence); + + spin_lock_irqsave(fence->lock, flags); + if (prev) + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + } + + /* + * If prev is NULL then the previous fence must have been signaled + * and we know that we are first on the timeline. If it is still + * present then, having the lock on that fence already acquired, we + * serialise with the interrupt handler, in the process of removing it + * from any future interrupt callback. A will then wait on C before + * executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + */ + if (prev) { __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } @@ -1098,11 +1143,7 @@ int i915_active_fence_set(struct i915_active_fence *active, int err = 0; /* Must maintain timeline ordering wrt previous active requests */ - rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); - if (fence) /* but the previous fence may not belong to that timeline! */ - fence = dma_fence_get_rcu(fence); - rcu_read_unlock(); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 79da5eca60af..bd85113ad150 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1596,6 +1596,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) &rq->dep, 0); } + if (prev) + i915_request_put(prev); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index cf35c2e10097..665204d39b30 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -312,7 +312,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); - sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive; + sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7f8607b97707..fe6c650d23ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -966,7 +966,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 32bbddc0993e..679aff79f4d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -123,6 +123,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 304e9d268bad..f894f8254824 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -916,7 +916,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 86547cfc38dc..e88740d4e54d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -852,4 +861,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index ebb947bd1446..086e4d49e112 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -101,4 +101,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index 4d40512b5c99..0bf438c3f7cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -566,4 +566,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 0b3964e6b36e..acdf0932a99e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -991,4 +991,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index ab50018813bc..8fdd04e7ef59 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1258,21 +1258,21 @@ static const struct panel_desc auo_g104sn02 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; -static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, }; static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 359266d9e860..f0f512d58497 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -318,7 +318,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index d636ba685451..17df5c7ccf69 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = to_qxl(dev); struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_CPU, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a08da0bd9098..fc5e3763c359 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; } +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj; - BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 38aabcbe2238..4066499ca79e 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -39,7 +39,6 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; @@ -51,7 +50,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -393,7 +392,6 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -413,7 +411,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 26a3ed142b6b..cfe13b203b89 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -820,12 +820,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * need align with 2 pixel. */ if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { - DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { - DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n"); return -EINVAL; } @@ -833,7 +833,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct vop *vop = to_vop(crtc); if (!vop->data->afbc) { - DRM_ERROR("vop does not support AFBC\n"); + DRM_DEBUG_KMS("vop does not support AFBC\n"); return -EINVAL; } @@ -842,15 +842,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane, return ret; if (new_plane_state->src.x1 || new_plane_state->src.y1) { - DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", - new_plane_state->src.x1, - new_plane_state->src.y1, fb->offsets[0]); + DRM_DEBUG_KMS("AFBC does not support offset display, " \ + "xpos=%d, ypos=%d, offset=%d\n", + new_plane_state->src.x1, new_plane_state->src.y1, + fb->offsets[0]); return -EINVAL; } if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { - DRM_ERROR("No rotation support in AFBC, rotation=%d\n", - new_plane_state->rotation); + DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n", + new_plane_state->rotation); return -EINVAL; } } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4d0ef5ab2531..391ed462f7fb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -606,7 +606,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 288e883177be..7bb7a69321d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1685,4 +1685,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw) return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; } +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 9144e8f88c81..ed75622bf708 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2003,7 +2003,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; @@ -2125,8 +2125,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); @@ -2143,6 +2141,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", + (unsigned int) cmd->body.type, + (unsigned int) cmd->body.slot); + return -EINVAL; + } + binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; @@ -2151,14 +2157,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; - if (binding.shader_slot >= max_shader_num || - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", - (unsigned int) cmd->body.type, - (unsigned int) binding.slot); - return -EINVAL; - } - vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); @@ -2179,15 +2177,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || - cmd->body.type >= max_allowed) { + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } @@ -2211,8 +2207,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; @@ -2223,8 +2217,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= max_allowed || - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5daec769df7a..5fceefb3c707 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -593,6 +593,7 @@ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 #define USB_VENDOR_ID_HP 0x03f0 +#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index c61da859cd3c..0ac67dd76574 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4377,6 +4377,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, { /* Logitech G903 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, + { /* Logitech G915 TKL Keyboard over USB */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) }, { /* Logitech G920 Wheel over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, @@ -4392,6 +4394,8 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Logitech G915 TKL keyboard over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) }, { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index c7c06aa958c4..96ca7d981ee2 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c index 4100eefb7ac3..61c195f8fd3b 100644 --- a/drivers/hwmon/pmbus/bel-pfe.c +++ b/drivers/hwmon/pmbus/bel-pfe.c @@ -17,12 +17,13 @@ enum chips {pfe1100, pfe3000}; /* - * Disable status check for pfe3000 devices, because some devices report - * communication error (invalid command) for VOUT_MODE command (0x20) - * although correct VOUT_MODE (0x16) is returned: it leads to incorrect - * exponent in linear mode. + * Disable status check because some devices report communication error + * (invalid command) for VOUT_MODE command (0x20) although the correct + * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear + * mode. + * This affects both pfe3000 and pfe1100. */ -static struct pmbus_platform_data pfe3000_plat_data = { +static struct pmbus_platform_data pfe_plat_data = { .flags = PMBUS_SKIP_STATUS_CHECK, }; @@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client) int model; model = (int)i2c_match_id(pfe_device_id, client)->driver_data; + client->dev.platform_data = &pfe_plat_data; /* * PFE3000-12-069RA devices may not stay in page 0 during device * probe which leads to probe failure (read status word failed). * So let's set the device to page 0 at the beginning. */ - if (model == pfe3000) { - client->dev.platform_data = &pfe3000_plat_data; + if (model == pfe3000) i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); - } return pmbus_do_probe(client, &pfe_driver_info[model]); } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 6304d1dd2dd6..ec6571b82fff 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -243,13 +243,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; + unsigned long flags; if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } @@ -260,12 +261,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { + unsigned long flags; + if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e0559eff8928..b79e1380ff68 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -525,9 +525,21 @@ i2c_dw_read(struct dw_i2c_dev *dev) u32 flags = msgs[dev->msg_read_idx].flags; regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ - if (flags & I2C_M_RECV_LEN && - (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + if (flags & I2C_M_RECV_LEN) { + /* + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be + * detected from the registers, the controller can be + * disabled if the STOP bit is set. But it is only set + * after receiving block data response length in + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read + * another byte with STOP bit set when the block data + * response length is invalid to complete the transaction. + */ + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) + tmp = 1; + len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index 1f406e6f4ece..6bdebe51ea11 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -329,6 +329,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context) struct hisi_i2c_controller *ctlr = context; u32 int_stat; + /* + * Don't handle the interrupt if cltr->completion is NULL. We may + * reach here because the interrupt is spurious or the transfer is + * started by another port (e.g. firmware) rather than us. + */ + if (!ctlr->completion) + return IRQ_NONE; + int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); hisi_i2c_clear_int(ctlr, int_stat); if (!(int_stat & HISI_I2C_INT_ALL)) diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 2334ad249b46..4fb4321a72cb 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/addac/Kconfig" source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/cdc/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index 65e39bd4f934..8d48c70fee4d 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += addac/ obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 9d713ce25263..6269728c9075 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -1011,22 +1011,6 @@ config STMPE_ADC Say yes here to build support for ST Microelectronics STMPE built-in ADC block (stmpe811). -config STX104 - tristate "Apex Embedded Systems STX104 driver" - depends on PC104 && X86 - select ISA_BUS_API - select GPIOLIB - help - Say yes here to build support for the Apex Embedded Systems STX104 - integrated analog PC/104 card. - - This driver supports the 16 channels of single-ended (8 channels of - differential) analog inputs, 2 channels of analog output, 4 digital - inputs, and 4 digital outputs provided by the STX104. - - The base port addresses for the devices may be configured via the base - array module parameter. - config SUN4I_GPADC tristate "Support for the Allwinner SoCs GPADC" depends on IIO diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 6eeb7a4749d0..58b0ae0e37cf 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -87,7 +87,6 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o obj-$(CONFIG_SPEAR_ADC) += spear_adc.o -obj-$(CONFIG_STX104) += stx104.o obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o obj-$(CONFIG_STM32_ADC) += stm32-adc.o diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index a4b2ff9e0dd5..9403c2604066 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = { enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { + const char *name; u16 config_default; int calibration_value; int shunt_voltage_lsb; /* nV */ @@ -155,6 +156,7 @@ struct ina2xx_chip_info { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { + .name = "ina219", .config_default = INA219_CONFIG_DEFAULT, .calibration_value = 4096, .shunt_voltage_lsb = 10000, @@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = { .chip_id = ina219, }, [ina226] = { + .name = "ina226", .config_default = INA226_CONFIG_DEFAULT, .calibration_value = 2048, .shunt_voltage_lsb = 2500, @@ -999,7 +1002,7 @@ static int ina2xx_probe(struct i2c_client *client, /* Patch the current config register with default. */ val = chip->config->config_default; - if (id->driver_data == ina226) { + if (type == ina226) { ina226_set_average(chip, INA226_DEFAULT_AVG, &val); ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val); ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val); @@ -1018,7 +1021,7 @@ static int ina2xx_probe(struct i2c_client *client, } indio_dev->modes = INDIO_DIRECT_MODE; - if (id->driver_data == ina226) { + if (type == ina226) { indio_dev->channels = ina226_channels; indio_dev->num_channels = ARRAY_SIZE(ina226_channels); indio_dev->info = &ina226_info; @@ -1027,7 +1030,7 @@ static int ina2xx_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ina219_channels); indio_dev->info = &ina219_info; } - indio_dev->name = id->name; + indio_dev->name = id ? id->name : chip->config->name; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, INDIO_BUFFER_SOFTWARE, diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig new file mode 100644 index 000000000000..1f598670e84f --- /dev/null +++ b/drivers/iio/addac/Kconfig @@ -0,0 +1,24 @@ +# +# ADC DAC drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog to digital and digital to analog converters" + +config STX104 + tristate "Apex Embedded Systems STX104 driver" + depends on PC104 && X86 + select ISA_BUS_API + select GPIOLIB + help + Say yes here to build support for the Apex Embedded Systems STX104 + integrated analog PC/104 card. + + This driver supports the 16 channels of single-ended (8 channels of + differential) analog inputs, 2 channels of analog output, 4 digital + inputs, and 4 digital outputs provided by the STX104. + + The base port addresses for the devices may be configured via the base + array module parameter. + +endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile new file mode 100644 index 000000000000..862914523354 --- /dev/null +++ b/drivers/iio/addac/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O ADDAC drivers +# + +# When adding new entries keep the list in alphabetical order +obj-$(CONFIG_STX104) += stx104.o diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/addac/stx104.c index 55bd2dc514e9..b658a75d4e3a 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/addac/stx104.c @@ -15,7 +15,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/mutex.h> #include <linux/spinlock.h> +#include <linux/types.h> #define STX104_OUT_CHAN(chan) { \ .type = IIO_VOLTAGE, \ @@ -45,13 +47,37 @@ module_param_hw_array(base, uint, ioport, &num_stx104, 0); MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); /** + * struct stx104_reg - device register structure + * @ssr_ad: Software Strobe Register and ADC Data + * @achan: ADC Channel + * @dio: Digital I/O + * @dac: DAC Channels + * @cir_asr: Clear Interrupts and ADC Status + * @acr: ADC Control + * @pccr_fsh: Pacer Clock Control and FIFO Status MSB + * @acfg: ADC Configuration + */ +struct stx104_reg { + u16 ssr_ad; + u8 achan; + u8 dio; + u16 dac[2]; + u8 cir_asr; + u8 acr; + u8 pccr_fsh; + u8 acfg; +}; + +/** * struct stx104_iio - IIO device private data structure + * @lock: synchronization lock to prevent I/O race conditions * @chan_out_states: channels' output states - * @base: base port address of the IIO device + * @reg: I/O address offset for the device registers */ struct stx104_iio { + struct mutex lock; unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; - unsigned int base; + struct stx104_reg __iomem *reg; }; /** @@ -64,7 +90,7 @@ struct stx104_iio { struct stx104_gpio { struct gpio_chip chip; spinlock_t lock; - unsigned int base; + u8 __iomem *base; unsigned int out_state; }; @@ -72,6 +98,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct stx104_iio *const priv = iio_priv(indio_dev); + struct stx104_reg __iomem *const reg = priv->reg; unsigned int adc_config; int adbu; int gain; @@ -79,7 +106,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: /* get gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); gain = adc_config & 0x3; *val = 1 << gain; @@ -90,25 +117,31 @@ static int stx104_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } + mutex_lock(&priv->lock); + /* select ADC channel */ - outb(chan->channel | (chan->channel << 4), priv->base + 2); + iowrite8(chan->channel | (chan->channel << 4), ®->achan); + + /* trigger ADC sample capture by writing to the 8-bit + * Software Strobe Register and wait for completion + */ + iowrite8(0, ®->ssr_ad); + while (ioread8(®->cir_asr) & BIT(7)); - /* trigger ADC sample capture and wait for completion */ - outb(0, priv->base); - while (inb(priv->base + 8) & BIT(7)); + *val = ioread16(®->ssr_ad); - *val = inw(priv->base); + mutex_unlock(&priv->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); *val = -32768 * adbu; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* get ADC bipolar/unipolar and gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); gain = adc_config & 0x3; @@ -130,16 +163,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev, /* Only four gain states (x1, x2, x4, x8) */ switch (val) { case 1: - outb(0, priv->base + 11); + iowrite8(0, &priv->reg->acfg); break; case 2: - outb(1, priv->base + 11); + iowrite8(1, &priv->reg->acfg); break; case 4: - outb(2, priv->base + 11); + iowrite8(2, &priv->reg->acfg); break; case 8: - outb(3, priv->base + 11); + iowrite8(3, &priv->reg->acfg); break; default: return -EINVAL; @@ -152,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 65535) return -EINVAL; + mutex_lock(&priv->lock); + priv->chan_out_states[chan->channel] = val; - outw(val, priv->base + 4 + 2 * chan->channel); + iowrite16(val, &priv->reg->dac[chan->channel]); + mutex_unlock(&priv->lock); return 0; } return -EINVAL; @@ -222,7 +258,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset) if (offset >= 4) return -EINVAL; - return !!(inb(stx104gpio->base) & BIT(offset)); + return !!(ioread8(stx104gpio->base) & BIT(offset)); } static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -230,7 +266,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, { struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); - *bits = inb(stx104gpio->base); + *bits = ioread8(stx104gpio->base); return 0; } @@ -252,7 +288,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset, else stx104gpio->out_state &= ~mask; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -279,7 +315,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip, stx104gpio->out_state &= ~*mask; stx104gpio->out_state |= *mask & *bits; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -306,11 +342,16 @@ static int stx104_probe(struct device *dev, unsigned int id) return -EBUSY; } + priv = iio_priv(indio_dev); + priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT); + if (!priv->reg) + return -ENOMEM; + indio_dev->info = &stx104_info; indio_dev->modes = INDIO_DIRECT_MODE; /* determine if differential inputs */ - if (inb(base[id] + 8) & BIT(5)) { + if (ioread8(&priv->reg->cir_asr) & BIT(5)) { indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff); indio_dev->channels = stx104_channels_diff; } else { @@ -320,18 +361,17 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); - priv = iio_priv(indio_dev); - priv->base = base[id]; + mutex_init(&priv->lock); /* configure device for software trigger operation */ - outb(0, base[id] + 9); + iowrite8(0, &priv->reg->acr); /* initialize gain setting to x1 */ - outb(0, base[id] + 11); + iowrite8(0, &priv->reg->acfg); /* initialize DAC output to 0V */ - outw(0, base[id] + 4); - outw(0, base[id] + 6); + iowrite16(0, &priv->reg->dac[0]); + iowrite16(0, &priv->reg->dac[1]); stx104gpio->chip.label = dev_name(dev); stx104gpio->chip.parent = dev; @@ -346,7 +386,7 @@ static int stx104_probe(struct device *dev, unsigned int id) stx104gpio->chip.get_multiple = stx104_gpio_get_multiple; stx104gpio->chip.set = stx104_gpio_set; stx104gpio->chip.set_multiple = stx104_gpio_set_multiple; - stx104gpio->base = base[id] + 3; + stx104gpio->base = &priv->reg->dio; stx104gpio->out_state = 0x0; spin_lock_init(&stx104gpio->lock); diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b0c1dc8cc4c5..f529c01ac66b 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -259,7 +259,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, platform_set_drvdata(pdev, indio_dev); state->ec = ec->ec_dev; - state->msg = devm_kzalloc(&pdev->dev, + state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) + max((u16)sizeof(struct ec_params_motion_sense), state->ec->max_response), GFP_KERNEL); if (!state->msg) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 86d479772fbc..957634eceba8 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, dma_addr_t mask; int i; + umem->iova = va = virt; + if (umem->is_odp) { unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); @@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, */ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); - umem->iova = va = virt; /* The best result is the smallest page size that results in the minimum * number of required pages. Compute the largest page size that could * work based on VA address bits that don't change. diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 689921dc3d4a..b69dd618146e 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12306,6 +12306,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index 8844eacf2380..e508c0753dd3 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -297,8 +297,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, uid, qp->uid); - mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); - return 0; + return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); } int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, @@ -548,14 +547,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn) return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in); } -static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) +static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, uid, uid); - mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); + return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); } int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, @@ -586,8 +585,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); - destroy_rq_tracked(dev, rq->qpn, rq->uid); - return 0; + return destroy_rq_tracked(dev, rq->qpn, rq->uid); } static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 890d88c6b534..c8bc1e43285f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -897,6 +897,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_batch *cmds, struct arm_smmu_cmdq_ent *cmd) { + if (cmds->num == CMDQ_BATCH_ENTRIES - 1 && + (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); + cmds->num = 0; + } + if (cmds->num == CMDQ_BATCH_ENTRIES) { arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); cmds->num = 0; @@ -3470,6 +3476,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return 0; } +#define IIDR_IMPLEMENTER_ARM 0x43b +#define IIDR_PRODUCTID_ARM_MMU_600 0x483 +#define IIDR_PRODUCTID_ARM_MMU_700 0x487 + +static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) +{ + u32 reg; + unsigned int implementer, productid, variant, revision; + + reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR); + implementer = FIELD_GET(IIDR_IMPLEMENTER, reg); + productid = FIELD_GET(IIDR_PRODUCTID, reg); + variant = FIELD_GET(IIDR_VARIANT, reg); + revision = FIELD_GET(IIDR_REVISION, reg); + + switch (implementer) { + case IIDR_IMPLEMENTER_ARM: + switch (productid) { + case IIDR_PRODUCTID_ARM_MMU_600: + /* Arm erratum 1076982 */ + if (variant == 0 && revision <= 2) + smmu->features &= ~ARM_SMMU_FEAT_SEV; + /* Arm erratum 1209401 */ + if (variant < 2) + smmu->features &= ~ARM_SMMU_FEAT_NESTING; + break; + case IIDR_PRODUCTID_ARM_MMU_700: + /* Arm erratum 2812531 */ + smmu->features &= ~ARM_SMMU_FEAT_BTM; + smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; + /* Arm errata 2268618, 2812531 */ + smmu->features &= ~ARM_SMMU_FEAT_NESTING; + break; + } + break; + } +} + static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; @@ -3675,6 +3719,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ias = max(smmu->ias, smmu->oas); + if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) && + (smmu->features & ARM_SMMU_FEAT_TRANS_S2)) + smmu->features |= ARM_SMMU_FEAT_NESTING; + + arm_smmu_device_iidr_probe(smmu); + if (arm_smmu_sva_supported(smmu)) smmu->features |= ARM_SMMU_FEAT_SVA; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 4cb136f07914..c594a9b46999 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -69,6 +69,12 @@ #define IDR5_VAX GENMASK(11, 10) #define IDR5_VAX_52_BIT 1 +#define ARM_SMMU_IIDR 0x18 +#define IIDR_PRODUCTID GENMASK(31, 20) +#define IIDR_VARIANT GENMASK(19, 16) +#define IIDR_REVISION GENMASK(15, 12) +#define IIDR_IMPLEMENTER GENMASK(11, 0) + #define ARM_SMMU_CR0 0x20 #define CR0_ATSCHK (1 << 4) #define CR0_CMDQEN (1 << 3) @@ -640,11 +646,13 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_BTM (1 << 16) #define ARM_SMMU_FEAT_SVA (1 << 17) #define ARM_SMMU_FEAT_E2H (1 << 18) +#define ARM_SMMU_FEAT_NESTING (1 << 19) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) #define ARM_SMMU_OPT_MSIPOLL (1 << 2) +#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3) u32 options; struct arm_smmu_cmdq cmdq; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index eba58b99cd29..d6cf01c32a33 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch) *z1t = cpu_to_le16(new_z1); /* now send data */ if (bch->tx_idx < bch->tx_skb->len) return; - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) goto next_t_frame; return; @@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch) } bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */ bz->f1 = new_f1; /* next frame */ - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); get_next_bframe(bch); } @@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch) if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) hfcpci_fill_fifo(bch); else { - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) hfcpci_fill_fifo(bch); } @@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused) return 0; if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { - spin_lock(&hc->lock); + spin_lock_irq(&hc->lock); bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ main_rec_hfcpci(bch); @@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused) main_rec_hfcpci(bch); tx_birq(bch); } - spin_unlock(&hc->lock); + spin_unlock_irq(&hc->lock); } return 0; } diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fa09d511a8ed..baf31258f5c9 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(struct timer_list *arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 6d2088fbaf69..1b73af501397 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ void -dsp_cmx_send(void *arg) +dsp_cmx_send(struct timer_list *arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 386084530c2f..fae95f166688 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1195,7 +1195,7 @@ static int __init dsp_init(void) } /* set sample timer */ - timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0); + timer_setup(&dsp_spl_tl, dsp_cmx_send, 0); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index d97a6765693f..389ac3d1f344 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c @@ -733,6 +733,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq, return -EINVAL; if (*nplanes) { + if (*nplanes != q_data->fmt->num_planes) + return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < q_data->sizeimage[i]) return -EINVAL; diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index 7f1647da0ade..af59cc52fdd7 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu, int vpu_load_firmware(struct platform_device *pdev) { struct mtk_vpu *vpu; - struct device *dev = &pdev->dev; + struct device *dev; struct vpu_run *run; int ret; if (!pdev) { - dev_err(dev, "VPU platform device is invalid\n"); + pr_err("VPU platform device is invalid\n"); return -EINVAL; } + dev = &pdev->dev; + vpu = platform_get_drvdata(pdev); run = &vpu->run; diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 4bcfbc9afbac..0f106d700625 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -171,7 +171,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00); - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index ffc128278613..282a03520cf5 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -427,17 +427,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); @@ -468,17 +461,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL, diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 53f3a1f45c4a..6b5e4bdf209d 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -302,12 +302,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) } } - /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. */ - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_write_register(pcr, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 9b42b20a3e5a..79b18f6f73a8 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); @@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) rts5260_init_hw(pcr); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); return 0; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 1fd4e0e50730..2a97eeb0e509 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -468,17 +468,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; u32 val; rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, @@ -524,17 +517,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 62fdbbd55e74..c0bf747305e2 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1400,8 +1400,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - if (pcr->aspm_mode == ASPM_MODE_REG) + if (pcr->aspm_mode == ASPM_MODE_REG) { rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 0b72096f10e6..965b44a09507 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2081,14 +2081,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { unsigned long flags; bool put_card; spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -2100,6 +2100,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2115,7 +2116,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8c2361e66277..985079943be7 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1413,8 +1413,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 52ed30f2d9f4..94e9a08bc90e 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 6c4f43e11282..7ede74bf3723 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -26,9 +26,16 @@ struct f_sdhost_priv { bool enable_cmd_dat_delay; }; +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctrl = 0; usleep_range(2500, 3000); @@ -61,7 +68,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctl; if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) @@ -85,30 +92,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - int irq, ctrl = 0, ret = 0; + int ctrl = 0, ret = 0; struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; u32 reg = 0; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); - priv = sdhci_priv(host); + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); priv->dev = dev; - host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | - SDHCI_QUIRK2_TUNING_WORK_AROUND; - priv->enable_cmd_dat_delay = device_property_read_bool(dev, "fujitsu,cmd-dat-delay-select"); @@ -116,18 +125,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (ret) goto err; - platform_set_drvdata(pdev, host); - - host->hw_name = "f_sdh30"; - host->ops = &sdhci_f_sdh30_ops; - host->irq = irq; - - host->ioaddr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->ioaddr)) { - ret = PTR_ERR(host->ioaddr); - goto err; - } - if (dev_of_node(dev)) { sdhci_get_of_property(pdev); @@ -182,23 +179,22 @@ err_add_host: err_clk: clk_disable_unprepare(priv->clk_iface); err: - sdhci_free_host(host); + sdhci_pltfm_free(pdev); + return ret; } static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct f_sdhost_priv *priv = sdhci_priv(host); - - sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == - 0xffffffff); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); + struct clk *clk_iface = priv->clk_iface; + struct clk *clk = priv->clk; - clk_disable_unprepare(priv->clk_iface); - clk_disable_unprepare(priv->clk); + sdhci_pltfm_unregister(pdev); - sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); + clk_disable_unprepare(clk_iface); + clk_disable_unprepare(clk); return 0; } diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 032f2c03e8fb..3c213816db78 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1341,8 +1341,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 7c7ec8d10232..b5b1a42ca25e 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index b3cc427100a2..636e65328bb3 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int i; int ret; - if (op->cs > NAND_MAX_CHIPS) + if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index bb256a3bb9be..9d441965321a 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1180,7 +1180,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand) struct meson_nfc *nfc = nand_get_controller_data(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct mtd_info *mtd = nand_to_mtd(nand); - int nsectors = mtd->writesize / 1024; int ret; if (!mtd->name) { @@ -1198,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) nand->options |= NAND_NO_SUBPAGE_WRITE; ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, - mtd->oobsize - 2 * nsectors); + mtd->oobsize - 2); if (ret) { dev_err(nfc->dev, "failed to ECC init\n"); return -EINVAL; diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 2b21ce04b3ec..1a48347be3fe 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index b5405bc7ca3a..99242bd68437 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 * * The rk_nfc_ooblayout_free() function already has reserved - * these 4 bytes with: + * these 4 bytes together with 2 bytes for BBM + * by reducing it's length: * - * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; */ if (!i) memcpy(rk_nfc_oob_ptr(chip, i), @@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, int pages_per_blk = mtd->erasesize / mtd->writesize; int ret = 0, i, boot_rom_mode = 0; dma_addr_t dma_data, dma_oob; - u32 reg; + u32 tmp; u8 *oob; nand_prog_page_begin_op(chip, page, 0, NULL, 0); @@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, * * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * * Configure the ECC algorithm supported by the boot ROM. */ if ((page < (pages_per_blk * rknand->boot_blks)) && @@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, } for (i = 0; i < ecc->steps; i++) { - if (!i) { - reg = 0xFFFFFFFF; - } else { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; - reg = oob[0] | oob[1] << 8 | oob[2] << 16 | - oob[3] << 24; - } - if (!i && boot_rom_mode) - reg = (page & (pages_per_blk - 1)) * 4; + tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24; if (nfc->cfg->type == NFC_V9) - nfc->oob_buf[i] = reg; + nfc->oob_buf[i] = tmp; else - nfc->oob_buf[i * (oob_step / 4)] = reg; + nfc->oob_buf[i * (oob_step / 4)] = tmp; } dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, @@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, goto timeout_err; } - for (i = 1; i < ecc->steps; i++) { - oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + for (i = 0; i < ecc->steps; i++) { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) tmp = nfc->oob_buf[i]; else tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; *oob++ = (u8)(tmp >> 8); *oob++ = (u8)(tmp >> 16); @@ -935,12 +944,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, if (section) return -ERANGE; - /* - * The beginning of the OOB area stores the reserved data for the NFC, - * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. - */ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; - oob_region->offset = NFC_SYS_DATA_SIZE + 2; + oob_region->offset = 2; return 0; } diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 7380b1ebaccd..a80427c13121 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index a6a70b872ac4..b29393831a30 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -657,10 +657,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; if (arp->op_code == htons(ARPOP_REPLY)) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69cc36c0840f..e64c652b78f0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5491,7 +5491,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; bond_dev->features |= bond_dev->hw_features; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index be5566168d0f..afd9060c5421 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + - sizeof(struct ifinfomsg), - nla_len(nla_peer) - - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d76b2377d66e..773d751ef169 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1422,7 +1422,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); if (IS_ERR(priv->clk_mdiv)) { @@ -1430,7 +1432,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) goto out_clk; } - clk_prepare_enable(priv->clk_mdiv); + ret = clk_prepare_enable(priv->clk_mdiv); + if (ret) + goto out_clk; ret = bcm_sf2_sw_rst(priv); if (ret) { diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 84e1c50ffc4d..836a88e9b700 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2591,6 +2591,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { + /* If the switch has just been reset and not yet completed + * loading EEPROM, the reset may interrupt the I2C transaction + * mid-byte, causing the first EEPROM read after the reset + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ + mv88e6xxx_g1_wait_eeprom_done(chip); + gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index a9c99ac81730..c691635cf4eb 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 8c800d9c11b7..bfe90cacbd07 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -571,7 +571,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) }; phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bfdc021f4a19..847ebb31d470 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -70,6 +70,8 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static void hclge_sync_fd_table(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); static struct hnae3_ae_algo ae_algo; @@ -7656,6 +7658,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + struct hclge_desc desc; struct hclge_config_mac_mode_cmd *req = (struct hclge_config_mac_mode_cmd *)desc.data; @@ -7680,9 +7684,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); } static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, @@ -7745,10 +7755,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev, } while (++i < HCLGE_PHY_LINK_STATUS_NUM); } -static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) { -#define HCLGE_MAC_LINK_STATUS_NUM 100 - int link_status; int i = 0; int ret; @@ -7761,13 +7770,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); - } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + } while (++i < wait_cnt); return -EBUSY; } static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { +#define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -7775,7 +7786,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, if (is_phy) hclge_phy_link_status_wait(hdev, link_ret); - return hclge_mac_link_status_wait(hdev, link_ret); + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); } static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 77d8db9b8a1d..05759f690e1f 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) unsigned long offset; for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); } /* replenish the buffers for a pool. note that we don't need to diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 450b4fd9aa7f..890e27b986e2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1007,12 +1007,22 @@ static int ibmvnic_login(struct net_device *netdev) static void release_login_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, + adapter->login_buf_sz, DMA_TO_DEVICE); kfree(adapter->login_buf); adapter->login_buf = NULL; } static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_rsp_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); kfree(adapter->login_rsp_buf); adapter->login_rsp_buf = NULL; } @@ -4220,11 +4230,14 @@ static int send_login(struct ibmvnic_adapter *adapter) if (rc) { adapter->login_pending = false; netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); - goto buf_rsp_map_failed; + goto buf_send_failed; } return 0; +buf_send_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); buf_rsp_map_failed: kfree(login_rsp_buffer); adapter->login_rsp_buf = NULL; @@ -4788,6 +4801,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, int num_tx_pools; int num_rx_pools; u64 *size_array; + u32 rsp_len; int i; /* CHECK: Test/set of login_pending does not need to be atomic @@ -4799,11 +4813,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, } adapter->login_pending = false; - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, - DMA_TO_DEVICE); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); - /* If the number of queues requested can't be allocated by the * server, the login response will return with code 1. We will need * to resend the login buffer with fewer queues requested. @@ -4839,6 +4848,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ibmvnic_reset(adapter, VNIC_RESET_FATAL); return -EIO; } + + rsp_len = be32_to_cpu(login_rsp->len); + if (be32_to_cpu(login->login_rsp_len) < rsp_len || + rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || + rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { + /* This can happen if a login request times out and there are + * 2 outstanding login requests sent, the LOGIN_RSP crq + * could have been for the older login request. So we are + * parsing the newer response buffer which may be incomplete + */ + dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); + ibmvnic_reset(adapter, VNIC_RESET_FATAL); + return -EIO; + } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); /* variable buffer sizes are not supported, so just read the diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 82af180cc5ee..b7556a6c2758 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ read_nvm_exit: * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 5af3ae68b7a1..a9a7453d969c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1275,6 +1275,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + fltr->ip_ver = 4; break; case AH_V4_FLOW: case ESP_V4_FLOW: @@ -1286,6 +1287,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + fltr->ip_ver = 4; break; case IPV4_USER_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; @@ -1298,6 +1300,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + fltr->ip_ver = 4; break; case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -1316,6 +1319,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + fltr->ip_ver = 6; break; case AH_V6_FLOW: case ESP_V6_FLOW: @@ -1331,6 +1335,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe sizeof(struct in6_addr)); fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + fltr->ip_ver = 6; break; case IPV6_USER_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, @@ -1347,6 +1352,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + fltr->ip_ver = 6; break; case ETHER_FLOW: fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; @@ -1357,6 +1363,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe return -EINVAL; } + err = iavf_validate_fdir_fltr_masks(adapter, fltr); + if (err) + return err; + if (iavf_fdir_is_dup_fltr(adapter, fltr)) return -EEXIST; @@ -1387,14 +1397,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; + spin_lock_bh(&adapter->fdir_fltr_lock); if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + spin_unlock_bh(&adapter->fdir_fltr_lock); dev_err(&adapter->pdev->dev, "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", IAVF_MAX_FDIR_FILTERS); return -ENOSPC; } - spin_lock_bh(&adapter->fdir_fltr_lock); if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1767,7 +1778,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLCNT: if (!FDIR_FLTR_SUPPORT(adapter)) break; + spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; + spin_unlock_bh(&adapter->fdir_fltr_lock); cmd->data = IAVF_MAX_FDIR_FILTERS; ret = 0; break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 6146203efd84..03e774bd2a5b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = { } }; +static const struct in6_addr ipv6_addr_zero_mask = { + .in6_u = { + .u6_addr8 = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + } +}; + +/** + * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if all masks of packet fields are either full or empty. Returns + * error on at least one partial mask. + */ +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_ver == 4) { + if (fltr->ip_mask.v4_addrs.src_ip && + fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.v4_addrs.dst_ip && + fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) + goto partial_mask; + } else if (fltr->ip_ver == 6) { + if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) + goto partial_mask; + } + + if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) + goto partial_mask; + + if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.l4_header && + fltr->ip_mask.l4_header != htonl(U32_MAX)) + goto partial_mask; + + return 0; + +partial_mask: + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); + return -EOPNOTSUPP; +} + /** * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload * @fltr: Flow Director filter data structure @@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); } - fltr->ip_ver = 4; - return 0; } @@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); } - fltr->ip_ver = 6; - return 0; } @@ -722,7 +791,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *tmp; + bool ret = false; + spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(tmp, &adapter->fdir_list_head, list) { if (tmp->flow_type != fltr->flow_type) continue; @@ -732,11 +803,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr * !memcmp(&tmp->ip_data, &fltr->ip_data, sizeof(fltr->ip_data)) && !memcmp(&tmp->ext_data, &fltr->ext_data, - sizeof(fltr->ext_data))) - return true; + sizeof(fltr->ext_data))) { + ret = true; + break; + } } + spin_unlock_bh(&adapter->fdir_fltr_lock); - return false; + return ret; } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 33c55c366315..9eb9f73f6adf 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -110,6 +110,8 @@ struct iavf_fdir_fltr { struct virtchnl_fdir_add vc_add_msg; }; +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr); int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 533a953f15ac..09525dbeccfe 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -359,7 +359,8 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) /* Receive Packet Data Buffer Size. * The Packet Data Buffer Size is defined in 128 byte units. */ - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(ICE_RLAN_CTX_DBUF_S)); /* use 32 byte descriptors */ rlan_ctx.dsize = 1; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0011b15e678c..9cdb7a856ab6 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1260,18 +1260,6 @@ void igb_ptp_init(struct igb_adapter *adapter) return; } - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, - igb_ptp_overflow_check); - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - igb_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1281,6 +1269,18 @@ void igb_ptp_init(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->ptp_flags |= IGB_PTP_ENABLED; + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); } } diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ce530f5fd7bd..52849f5e8048 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -85,8 +85,13 @@ union igc_adv_rx_desc { #define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ /* SRRCTL bit definitions */ -#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 60d0ca69ceca..703b62c5f79b 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -539,7 +539,7 @@ #define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ #define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ #define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ -#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) #define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index a47dce10d3a7..a8c24a1c12b4 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -674,8 +674,11 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter, else buf_size = IGC_RXBUFFER_2048; - srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; wr32(IGC_SRRCTL(reg_idx), srrctl); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index df9a8eefa007..916e85039b61 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1301,11 +1301,10 @@ static int korina_probe(struct platform_device *pdev) else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0) eth_hw_addr_random(dev); - clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); + clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk"); if (IS_ERR(clk)) return PTR_ERR(clk); if (clk) { - clk_prepare_enable(clk); lp->mii_clock_freq = clk_get_rate(clk); } else { lp->mii_clock_freq = 200000000; /* max possible input clk */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index dee2f2086bb5..f5922d63e33e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4013,9 +4013,10 @@ rx_frscfg: if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - nix_find_link_frs(rvu, req, pcifunc); linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index a8d7b889ebee..6bef633aa633 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -645,7 +645,8 @@ pick_fw_ver: err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); if (err) { - if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) { + if (ver_maj != PRESTERA_PREV_FW_MAJ_VER || + ver_min != PRESTERA_PREV_FW_MIN_VER) { ver_maj = PRESTERA_PREV_FW_MAJ_VER; ver_min = PRESTERA_PREV_FW_MIN_VER; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index b56fea142c24..4590d19c25cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) trailer_len = alen + plen + 2; - pskb_trim(skb, skb->len - trailer_len); + ret = pskb_trim(skb, skb->len - trailer_len); + if (unlikely(ret)) + return ret; if (skb->protocol == htons(ETH_P_IP)) { ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len); ip_send_check(ipv4hdr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 605c8ecc3610..ccccbac04428 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -981,7 +981,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ if (!mlx5_core_is_sf(dev)) clear_rmap(dev); - mlx5_irq_table_destroy(dev); + mlx5_irq_table_free_irqs(dev); mutex_unlock(&table->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index cb3f9de3d00b..161ad2ae4019 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -802,7 +802,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, struct fs_node *iter = list_entry(start, struct fs_node, list); struct mlx5_flow_table *ft = NULL; - if (!root || root->type == FS_TYPE_PRIO_CHAINS) + if (!root) return NULL; list_for_each_advance_continue(iter, &root->children, reverse) { @@ -818,20 +818,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse is false then return the first flow table in next priority of - * prio in the tree, else return the last flow table in the previous priority - * of prio in the tree. +static struct fs_node *find_prio_chains_parent(struct fs_node *parent, + struct fs_node **child) +{ + struct fs_node *node = NULL; + + while (parent && parent->type != FS_TYPE_PRIO_CHAINS) { + node = parent; + parent = parent->parent; + } + + if (child) + *child = node; + + return parent; +} + +/* If reverse is false then return the first flow table next to the passed node + * in the tree, else return the last flow table before the node in the tree. + * If skip is true, skip the flow tables in the same prio_chains prio. */ -static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse, + bool skip) { + struct fs_node *prio_chains_parent = NULL; struct mlx5_flow_table *ft = NULL; struct fs_node *curr_node; struct fs_node *parent; - parent = prio->node.parent; - curr_node = &prio->node; + if (skip) + prio_chains_parent = find_prio_chains_parent(node, NULL); + parent = node->parent; + curr_node = node; while (!ft && parent) { - ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + if (parent != prio_chains_parent) + ft = find_closest_ft_recursive(parent, &curr_node->list, + reverse); curr_node = parent; parent = curr_node->parent; } @@ -839,15 +861,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, false); + return find_closest_ft(node, false, true); } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, true); + return find_closest_ft(node, true, true); } static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, @@ -859,7 +881,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return find_next_chained_ft(prio); + return find_next_chained_ft(&prio->node); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, @@ -883,21 +905,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, return 0; } +static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node, + struct fs_node *parent, + struct fs_node **child, + bool reverse) +{ + struct mlx5_flow_table *ft; + + ft = find_closest_ft(node, reverse, false); + + if (ft && parent == find_prio_chains_parent(&ft->node, child)) + return ft; + + return NULL; +} + /* Connect flow tables from previous priority of prio to ft */ static int connect_prev_fts(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) { + struct fs_node *prio_parent, *parent = NULL, *child, *node; struct mlx5_flow_table *prev_ft; + int err = 0; + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + /* return directly if not under the first sub ns of prio_chains prio */ + if (prio_parent && !list_is_first(&child->list, &prio_parent->children)) + return 0; - prev_ft = find_prev_chained_ft(prio); - if (prev_ft) { + prev_ft = find_prev_chained_ft(&prio->node); + while (prev_ft) { struct fs_prio *prev_prio; fs_get_obj(prev_prio, prev_ft->node.parent); - return connect_fts_in_prio(dev, prev_prio, ft); + err = connect_fts_in_prio(dev, prev_prio, ft); + if (err) + break; + + if (!parent) { + parent = find_prio_chains_parent(&prev_prio->node, &child); + if (!parent) + break; + } + + node = child; + prev_ft = find_closet_ft_prio_chains(node, parent, &child, true); } - return 0; + return err; } static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio @@ -1036,7 +1092,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table if (err) return err; - next_ft = first_ft ? first_ft : find_next_chained_ft(prio); + next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node); err = connect_fwd_rules(dev, ft, next_ft); if (err) return err; @@ -1111,7 +1167,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); next_ft = unmanaged ? ft_attr->next_ft : - find_next_chained_ft(fs_prio); + find_next_chained_ft(&fs_prio->node); ft->def_miss_action = ns->def_miss_action; ft->ns = ns; err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft); @@ -2080,13 +2136,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules); /* Assuming prio->node.children(flow tables) is sorted by level */ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) { + struct fs_node *prio_parent, *child; struct fs_prio *prio; fs_get_obj(prio, ft->node.parent); if (!list_is_last(&ft->node.list, &prio->node.children)) return list_next_entry(ft, node.list); - return find_next_chained_ft(prio); + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + if (prio_parent && list_is_first(&child->list, &prio_parent->children)) + return find_closest_ft(&prio->node, false, false); + + return find_next_chained_ft(&prio->node); } static int update_root_ft_destroy(struct mlx5_flow_table *ft) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 8490c0cf80a8..6fece284de0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -189,10 +189,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work) clock = container_of(timer, struct mlx5_clock, timer); mdev = container_of(clock, struct mlx5_core_dev, clock); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&timer->tc); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); + +out: schedule_delayed_work(&timer->overflow_work, timer->overflow_period); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index abd024173c42..8cf40a3658d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -16,6 +16,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 11f3649fdaab..df16dc35bb04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -550,6 +550,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) irq_pool_free(table->pf_pool); } +static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) +{ + struct mlx5_irq *irq; + unsigned long index; + + xa_for_each(&pool->irqs, index, irq) + free_irq(irq->irqn, &irq->nh); +} + +static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) +{ + if (table->sf_ctrl_pool) { + mlx5_irq_pool_free_irqs(table->sf_comp_pool); + mlx5_irq_pool_free_irqs(table->sf_ctrl_pool); + } + mlx5_irq_pool_free_irqs(table->pf_pool); +} + /* irq_table API */ int mlx5_irq_table_init(struct mlx5_core_dev *dev) @@ -630,6 +648,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) pci_free_irq_vectors(dev->pdev); } +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_irq_table *table = dev->priv.irq_table; + + if (mlx5_core_is_sf(dev)) + return; + + mlx5_irq_pools_free_irqs(table); + pci_free_irq_vectors(dev->pdev); +} + int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) { if (table->sf_comp_pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index e8185b69ac6c..373d3d4bf3a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -256,8 +256,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) host_total_vfs = MLX5_GET(query_esw_functions_out, out, host_params_context.host_total_vfs); kvfree(out); - if (host_total_vfs) - return host_total_vfs; + return host_total_vfs; } done: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index fcf705ce421f..aa003a75946b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -528,11 +528,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (err) - return err; + goto err_free_in; *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); - kvfree(in); +err_free_in: + kvfree(in); return err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d58e021614cd..b656408b9d70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -877,12 +877,13 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); /** - * @brief qed_concrete_to_sw_fid - get the sw function id from - * the concrete value. + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. * - * @param concrete_fid + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. * - * @return inline u8 + * Return: inline u8. */ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8adb7ed0c12d..d31196db7bdd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -28,24 +28,23 @@ struct qed_tid_mem { }; /** - * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. * + * @p_hwfn: HW device data. + * @p_info: In/out. * - * @param p_hwfn - * @param p_info in/out - * - * @return int + * Return: Int. */ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); /** - * @brief qed_cxt_get_tid_mem_info + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. * - * @param p_hwfn - * @param p_info + * @p_hwfn: HW device data. + * @p_info: in/out. * - * @return int + * Return: int. */ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info); @@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid); /** - * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. * - * @param p_hwfn - * @param rdma_tasks - requested maximum - * @return int + * Return: int. */ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** - * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. * - * @param p_hwfn - * @param last_line + * @p_hwfn: HW device data. + * @last_line: Last_line. * - * @return int + * Return: Int */ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); /** - * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. * - * @param p_hwfn - * @param used_lines + * Return: Int. */ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** - * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_free + * qed_cxt_mngr_free() - Context manager free. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * qed_cxt_mngr_setup(): Reset the acquired CIDs. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. - * + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. * - * @param p_hwfn - * @param p_ptt - * @param is_pf_loading + * Return: Void. */ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading); /** - * @brief Reconfigures QM pf on the fly + * qed_qm_reconf(): Reconfigures QM pf on the fly. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_CXT_PF_CID (0xff) /** - * @brief qed_cxt_release - Release a cid + * qed_cxt_release_cid(): Release a cid. * - * @param p_hwfn - * @param cid + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); /** - * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @param p_hwfn - * @param cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * Return: Void. */ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. * - * @param p_hwfn - * @param type - * @param p_cid + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. * - * @return int + * Return: Int. */ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid); /** - * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type - * for a vf-queue + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. * - * @param p_hwfn - * @param type - * @param p_cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @return int + * Return: Int. */ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d3c1f3879be8..a0a766a1723c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -15,44 +15,52 @@ #include "qed_int.h" /** - * @brief qed_init_dp - initialize the debug level + * qed_init_dp(): Initialize the debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. */ void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level); /** - * @brief qed_init_struct - initialize the device structure to - * its defaults + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_struct(struct qed_dev *cdev); /** - * @brief qed_resc_free - + * qed_resc_free: Free device resources. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_resc_free(struct qed_dev *cdev); /** - * @brief qed_resc_alloc - + * qed_resc_alloc(): Alloc device resources. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_resc_alloc(struct qed_dev *cdev); /** - * @brief qed_resc_setup - + * qed_resc_setup(): Setup device resources. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_resc_setup(struct qed_dev *cdev); @@ -105,94 +113,113 @@ struct qed_hw_init_params { }; /** - * @brief qed_hw_init - + * qed_hw_init(): Init Qed hardware. * - * @param cdev - * @param p_params + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. * - * @return int + * Return: Int. */ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** - * @brief qed_hw_timers_stop_all - stop the timers HW block + * qed_hw_timers_stop_all(): Stop the timers HW block. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return void + * Return: void. */ void qed_hw_timers_stop_all(struct qed_dev *cdev); /** - * @brief qed_hw_stop - + * qed_hw_stop(): Stop Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_hw_stop(struct qed_dev *cdev); /** - * @brief qed_hw_stop_fastpath -should be called incase - * slowpath is still required for the device, - * but fastpath is not. + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_hw_stop_fastpath(struct qed_dev *cdev); /** - * @brief qed_hw_start_fastpath -restart fastpath traffic, - * only if hw_stop_fastpath was called + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); /** - * @brief qed_hw_prepare - + * qed_hw_prepare(): Prepare Qed hardware. * - * @param cdev - * @param personality - personality to initialize + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. * - * @return int + * Return: Int. */ int qed_hw_prepare(struct qed_dev *cdev, int personality); /** - * @brief qed_hw_remove - + * qed_hw_remove(): Remove Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_hw_remove(struct qed_dev *cdev); /** - * @brief qed_ptt_acquire - Allocate a PTT window + * qed_ptt_acquire(): Allocate a PTT window. * - * Should be called at the entry point to the driver (at the beginning of an - * exported function) + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: struct qed_ptt. * - * @return struct qed_ptt + * Should be called at the entry point to the driver (at the beginning of an + * exported function). */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_release - Release PTT Window + * qed_ptt_acquire_context(): Allocate a PTT window honoring the context + * atomicy. * - * Should be called at the end of a flow - at the end of the function that - * acquired the PTT. + * @p_hwfn: HW device data. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: struct qed_ptt. + * + * Should be called at the entry point to the driver + * (at the beginning of an exported function). + */ +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, + bool is_atomic); + +/** + * qed_ptt_release(): Release PTT Window. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * + * Return: Void. * - * @param p_hwfn - * @param p_ptt + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -205,15 +232,17 @@ enum qed_dmae_address_type_t { }; /** - * @brief qed_dmae_host2grc - copy data from source addr to - * dmae registers using the given ptt + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param grc_addr (dmae_data_offset) - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. */ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, @@ -224,29 +253,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_grc2host - Read data from dmae data offset - * to source address using the given ptt + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_ptt - * @param grc_addr (dmae_data_offset) - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_host2host - copy data from to source address - * to a destination adress (for SRIOV) using the given ptt + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -259,51 +293,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** - * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * qed_fw_l2_queue(): Get absolute L2 queue ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id); /** - * @@brief qed_fw_vport - Get absolute vport ID + * qed_fw_vport(): Get absolute vport ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * qed_fw_rss_eng(): Get absolute RSS engine ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter - * banks that are allocated to the PF. + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return u8 - Number of LLH filter banks + * Return: u8 Number of LLH filter banks. */ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); @@ -314,45 +348,50 @@ enum qed_eng { }; /** - * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given - * LLH filter bank. + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param eng + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng); /** - * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. * - * @param cdev - * @param eng + * @cdev: Qed dev pointer. + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); /** - * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter - * bank. + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param mac_addr - MAC to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. + * + * Return: Int. */ int qed_llh_add_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); /** - * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given - * filter bank. + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove * - * @param p_ptt - * @param p_filter - MAC to remove + * Return: Void. */ void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); @@ -368,15 +407,16 @@ enum qed_llh_prot_filter_type_t { }; /** - * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the - * given filter bank. + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add - * @param type - type of filters and comparing + * Return: Int. */ int qed_llh_add_protocol_filter(struct qed_dev *cdev, @@ -385,14 +425,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from - * the given filter bank. + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. */ void qed_llh_remove_protocol_filter(struct qed_dev *cdev, @@ -401,31 +441,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * *@brief Cleanup of previous driver remains prior to load + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. * - * @param p_hwfn - * @param p_ptt - * @param id - For PF, engine-relative. For VF, PF-relative. - * @param is_vf - true iff cleanup is made for a VF. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. * - * @return int + * Return: Int. */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. * - * @param p_hwfn - * @param p_coal - store coalesce value read from the hardware. - * @param p_handle + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. * - * @return int + * Return: Int. **/ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and * Tx queue. The fact that we can configure coalescing to up to 511, but on * varying accuracy [the bigger the value the less accurate] up to a mistake * of 3usec for the highest values. @@ -433,37 +473,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. * - * @param rx_coal - Rx Coalesce value in micro seconds. - * @param tx_coal - TX Coalesce value in micro seconds. - * @param p_handle - * - * @return int + * Return: Int. **/ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** - * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. * - * @param p_hwfn - * @param p_ptt - * @param b_enable - true/false + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. * - * @return int + * Return: Int. */ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_width - doorbell is 32b pr 64b - * @param db_space - doorbell recovery addresses are user or kernel space + * Return: Int. */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, @@ -472,13 +513,15 @@ int qed_db_recovery_add(struct qed_dev *cdev, enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * qed_db_recovery_del() - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the * entry to delete. + * + * Return: Int. */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index b768f0698170..0c55249b3a35 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -694,13 +694,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, - struct qed_fcoe_stats *p_stats) + struct qed_fcoe_stats *p_stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(p_stats, 0, sizeof(*p_stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); @@ -974,19 +975,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_fcoe_stats_context(struct qed_dev *cdev, + struct qed_fcoe_stats *stats, + bool is_atomic) +{ + return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) { - return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_fcoe_stats_context(cdev, stats, false); } void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { struct qed_fcoe_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_fcoe_stats(cdev, &proto_stats)) { + if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect FCoE statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 19c85adf4ceb..214e8299ecb4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn); void qed_fcoe_setup(struct qed_hwfn *p_hwfn); void qed_fcoe_free(struct qed_hwfn *p_hwfn); +/** + * qed_get_protocol_stats_fcoe(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats); + struct qed_mcp_fcoe_stats *stats, + bool is_atomic); #else /* CONFIG_QED_FCOE */ static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { @@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {} static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { } #endif /* CONFIG_QED_FCOE */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index fb1baa2da2d0..744c82a10875 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3012,96 +3012,102 @@ struct iro { /***************************** Public Functions *******************************/ /** - * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug - * arrays. + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. */ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr); /** - * @brief qed_read_regs - Reads registers into a buffer (using GRC). + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf - Destination buffer. - * @param addr - Source GRC address in dwords. - * @param len - Number of registers to read. + * Return: Void. */ void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); /** - * @brief qed_read_fw_info - Reads FW info from the chip. + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. * * The FW info contains FW-related information, such as the FW version, * FW image (main/L2B/kuku), FW timestamp, etc. * The FW info is read from the internal RAM of the first Storm that is not in * reset. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param fw_info - Out: a pointer to write the FW info into. - * - * @return true if the FW info was read successfully from one of the Storms, - * or false if all Storms are in reset. */ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info); /** - * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. + * qed_dbg_grc_config(): Sets the value of a GRC parameter. * - * @param p_hwfn - HW device data - * @param grc_param - GRC parameter - * @param val - Value to set. + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. * - * @return error if one of the following holds: - * - the version wasn't set - * - grc_param is invalid - * - val is outside the allowed boundaries + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. */ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param, u32 val); /** - * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their - * default value. + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. * - * @param p_hwfn - HW device data + * Return: Void. */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); /** - * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for - * GRC Dump. + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the collected GRC data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified dump buffer is too small - * Otherwise, returns ok. + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3110,36 +3116,36 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size - * for idle check results. + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the idle check - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the idle check data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3148,42 +3154,42 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size - * for mcp trace results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * Otherwise, returns ok. + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. */ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the mcp trace data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * - the trace meta data cannot be read (from NVRAM or image file) - * Otherwise, returns ok. + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. */ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3192,36 +3198,36 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size - * for grc trace fifo results. + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the reg fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3230,37 +3236,37 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size - * for the IGU fifo results. + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the IGU fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. */ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3269,37 +3275,37 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required - * buffer size for protection override window results. + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for protection - * override data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_protection_override_dump - Reads protection override window - * entries and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the protection override data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3307,34 +3313,34 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords); /** - * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer - * size for FW Asserts results. + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the FW Asserts data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3343,19 +3349,19 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_read_attn - Reads the attention registers of the specified + * qed_dbg_read_attn(): Reads the attention registers of the specified * block and type, and writes the results into the specified buffer. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param block - Block ID. - * @param attn_type - Attention type. - * @param clear_status - Indicates if the attention status should be cleared. - * @param results - OUT: Pointer to write the read results into + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3365,15 +3371,15 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); /** - * @brief qed_dbg_print_attn - Prints attention registers values in the - * specified results struct. + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. * - * @param p_hwfn - * @param results - Pointer to the attention read results + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); @@ -3420,60 +3426,64 @@ struct dbg_tools_user_data { /***************************** Public Functions *******************************/ /** - * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with - * debug arrays. + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. */ enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr); /** - * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. * - * @param p_hwfn - HW device data - * @param user_data_ptr - OUT: a pointer to the allocated memory. + * Return: dbg_status. */ enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, void **user_data_ptr); /** - * @brief qed_dbg_get_status_str - Returns a string for the specified status. + * qed_dbg_get_status_str(): Returns a string for the specified status. * - * @param status - a debug status code. + * @status: A debug status code. * - * @return a string for the specified status + * Return: A string for the specified status. */ const char *qed_dbg_get_status_str(enum dbg_status status); /** - * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size - * for idle check results (in bytes). + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); /** - * @brief qed_print_idle_chk_results - Prints idle check results + * qed_print_idle_chk_results(): Prints idle check results * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the idle check results. - * @param num_errors - OUT: number of errors found in idle check. - * @param num_warnings - OUT: number of warnings found in idle check. + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3483,28 +3493,30 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). - * - * @param data - pointer to MCP Trace meta data - * @param size - size of MCP Trace meta data in dwords */ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, const u32 *meta_buf); /** - * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size - * for MCP Trace results (in bytes). + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - MCP Trace dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Rrror if the parsing fails, ok otherwise. */ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3512,14 +3524,14 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_mcp_trace_results - Prints MCP Trace results + * qed_print_mcp_trace_results(): Prints MCP Trace results * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3527,30 +3539,30 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and * keeps the MCP trace meta data allocated, to support continuous MCP Trace * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should * be called to free the meta data. * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, u32 *dump_buf, char *results_buf); /** - * @brief print_mcp_trace_line - Prints MCP Trace results for a single line + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_bytes - number of bytes that were dumped. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, u8 *dump_buf, @@ -3558,24 +3570,26 @@ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. * Should be called after continuous MCP Trace parsing. * - * @param p_hwfn - HW device data + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); /** - * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size - * for reg_fifo results (in bytes). + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3583,14 +3597,14 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_reg_fifo_results - Prints reg fifo results + * qed_print_reg_fifo_results(): Prints reg fifo results. * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3598,16 +3612,16 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size - * for igu_fifo results (in bytes). + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3615,14 +3629,14 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_igu_fifo_results - Prints IGU fifo results + * qed_print_igu_fifo_results(): Prints IGU fifo results * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the IGU fifo results. + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3630,16 +3644,16 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_protection_override_results_buf_size - Returns the required - * buffer size for protection override results (in bytes). + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, @@ -3648,15 +3662,15 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_protection_override_results - Prints protection override - * results. + * qed_print_protection_override_results(): Prints protection override + * results. * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3664,16 +3678,16 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size - * for FW Asserts results (in bytes). + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3681,14 +3695,14 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_fw_asserts_results - Prints FW Asserts results + * qed_print_fw_asserts_results(): Prints FW Asserts results. * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the FW Asserts results. + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3696,15 +3710,15 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_dbg_parse_attn - Parses and prints attention registers values in - * the specified results struct. + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. * - * @param p_hwfn - HW device data - * @param results - Pointer to the attention read results + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); @@ -3746,18 +3760,18 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL /** - * @brief qed_qm_pf_mem_size - prepare QM ILT sizes + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. * - * Returns the required host memory size in 4KB units. - * Must be called before all QM init HSI functions. + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. * - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * Return: The required host memory size in 4KB units. * - * @return The required host memory size in 4KB units. + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. */ u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, @@ -3800,74 +3814,74 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_pf_rt_init_params *p_params); /** - * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF + * qed_init_pf_rl(): Initializes the rate limit of the specified PF * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param first_tx_pq_id- An array containing the first Tx PQ ID associated - * with the VPORT for each TC. This array is filled by - * qed_qm_pf_rt_init - * @param vport_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_init_global_rl - Initializes the rate limit of the specified - * rate limiter + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param rl_id - RL ID - * @param rate_limit - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit); /** - * @brief qed_send_qm_stop_cmd Sends a stop command to the QM + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. * - * @param p_hwfn - * @param p_ptt - * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting for - * QM command done. + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3875,53 +3889,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs); /** - * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - vxlan destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. + * + * Return: Void. */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param vxlan_enable - vxlan enable flag. + * Return: Void. */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param eth_gre_enable - eth GRE enable enable flag. - * @param ip_gre_enable - IP GRE enable enable flag. + * Return: Void. */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable); /** - * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - geneve destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. + * + * Retur: Void. */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. * - * @param p_ptt - ptt window used for writing the registers. - * @param eth_geneve_enable - eth GENEVE enable enable flag. - * @param ip_geneve_enable - IP GENEVE enable enable flag. + * Return: Void. */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3931,25 +3956,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable); /** - * @brief qed_gft_disable - Disable GFT + * qed_gft_disable(): Disable GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to disable GFT. + * Return: Void. */ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** - * @brief qed_gft_config - Enable and configure HW for GFT - * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to enable GFT. - * @param tcp - set profile tcp packets. - * @param udp - set profile udp packet. - * @param ipv4 - set profile ipv4 packet. - * @param ipv6 - set profile ipv6 packet. - * @param profile_type - define packet same fields. Use enum gft_profile_type. + * qed_gft_config(): Enable and configure HW for GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. + * + * Return: Void. */ void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3959,107 +3988,120 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool ipv4, bool ipv6, enum gft_profile_type profile_type); /** - * @brief qed_enable_context_validation - Enable and configure context - * validation. + * qed_enable_context_validation(): Enable and configure context + * validation. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. + * Return: Void. */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_calc_session_ctx_validation - Calcualte validation byte for - * session context. + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param cid - context cid. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. + * + * Return: Void. */ void qed_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid); /** - * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task - * context. + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param tid - context tid. + * Return: Void. */ void qed_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid); /** - * @brief qed_memset_session_ctx - Memset session context to 0 while - * preserving validation bytes. + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. * - * @param p_hwfn - - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * Return: Void. */ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); /** - * @brief qed_memset_task_ctx - Memset task context to 0 while preserving - * validation bytes. + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. + * + * Return: Void. */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** - * @brief qed_set_rdma_error_level - Sets the RDMA assert level. - * If the severity of the error will be - * above the level, the FW will assert. - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers - * @param assert_level - An array of assert levels for each storm. + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. * + * Return: Void. */ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); /** - * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. * - * @param p_hwfn - HW device data - * @param fw_overlay_in_buf - the input FW overlay buffer. - * @param buf_size - the size of the input FW overlay buffer in bytes. - * must be aligned to dwords. - * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. * - * @return a pointer to the allocated overlays memory, + * Return: A pointer to the allocated overlays memory, * or NULL in case of failures. */ struct phys_mem_desc * qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, - const u32 * const fw_overlay_in_buf, + const u32 *const fw_overlay_in_buf, u32 buf_size_in_bytes); /** - * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. * - * @param p_hwfn - HW device data. - * @param p_ptt - ptt window used for writing the registers. - * @param fw_overlay_mem - the allocated FW overlay memory. + * Return: Void. */ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem); /** - * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. + * + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. * - * @param p_hwfn - HW device data. - * @param fw_overlay_mem - the allocated FW overlay memory to free. + * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, struct phys_mem_desc *fw_overlay_mem); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 554f30b0cfd5..6263f847b6b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -23,7 +23,10 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -#define QED_BAR_ACQUIRE_TIMEOUT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10 /* Invalid values */ #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) @@ -85,11 +88,21 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) { + return qed_ptt_acquire_context(p_hwfn, false); +} + +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic) +{ struct qed_ptt *p_ptt; - unsigned int i; + unsigned int i, count; + + if (is_atomic) + count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT; + else + count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT; /* Take the free PTT from the list */ - for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { + for (i = 0; i < count; i++) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { @@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) } spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); - usleep_range(1000, 2000); + + if (is_atomic) + udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY); + else + usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP, + QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2); } DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 2734f49956f7..e535983ce21b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask { #define DMAE_MAX_CLIENTS 32 /** - * @brief qed_gtt_init - Initialize GTT windows + * qed_gtt_init(): Initialize GTT windows. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return struct _qed_status - success (0), negative - error. + * Return: struct _qed_status - success (0), negative - error. */ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_free - + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. * - * @param p_hwfn - * @param p_ptt + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); /** - * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * qed_ptt_set_win(): Set PTT Window's GRC BAR address * - * @param p_hwfn - * @param new_hw_addr - * @param p_ptt + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. */ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr); /** - * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * qed_get_reserved_ptt(): Get a specific reserved PTT. * - * @param p_hwfn - * @param ptt_idx + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. * - * @return struct qed_ptt * + * Return: struct qed_ptt *. */ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx); /** - * @brief qed_wr - Write value to BAR using the given ptt + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn, u32 val); /** - * @brief qed_rd - Read value from BAR using the given ptt + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr); /** - * @brief qed_memcpy_from - copy n bytes from BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param dest - * @param hw_addr - * @param n + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. */ void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, size_t n); /** - * @brief qed_memcpy_to - copy n bytes to BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param hw_addr - * @param src - * @param n + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. */ void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, void *src, size_t n); /** - * @brief qed_fid_pretend - pretend to another function when - * accessing the ptt window. There is no way to unpretend - * a function. The only way to cancel a pretend is to - * pretend back to the original function. - * - * @param p_hwfn - * @param p_ptt - * @param fid - fid field of pxp_pretend structure. Can contain - * either pf / vf, port/path fields are don't care. + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. */ void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid); /** - * @brief qed_port_pretend - pretend to another port when - * accessing the ptt window + * qed_port_pretend(): Pretend to another port when accessing the ptt window * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. */ void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id); /** - * @brief qed_port_unpretend - cancel any previously set port - * pretend + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_port_fid_pretend - pretend to another port and another function - * when accessing the ptt window + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to - * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + * Return: Void. */ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid); /** - * @brief qed_vfid_to_concrete - build a concrete FID for a - * given VF ID + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID * - * @param p_hwfn - * @param p_ptt - * @param vfid + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. */ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); /** - * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd - * this is declared here since other files will require it. - * @param idx + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. */ u32 qed_dmae_idx_to_go_cmd(u8 idx); /** - * @brief qed_dmae_info_alloc - Init the dmae_info structure - * which is part of p_hwfn. - * @param p_hwfn + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. */ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_dmae_info_free - Free the dmae_info structure - * which is part of p_hwfn + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); @@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, #define QED_HW_ERR_MAX_STR_SIZE 256 /** - * @brief qed_hw_err_notify - Notify upper layer driver and management FW - * about a HW error. - * - * @param p_hwfn - * @param p_ptt - * @param err_type - * @param fmt - debug data buffer to send to the MFW - * @param ... - buffer format args + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. */ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index a573c8921982..1dbc460c9eec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -12,23 +12,24 @@ #include "qed.h" /** - * @brief qed_init_iro_array - init iro_arr. + * qed_init_iro_array(): init iro_arr. * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_iro_array(struct qed_dev *cdev); /** - * @brief qed_init_run - Run the init-sequence. + * qed_init_run(): Run the init-sequence. * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. * - * @param p_hwfn - * @param p_ptt - * @param phase - * @param phase_id - * @param modes - * @return _qed_status_t + * Return: _qed_status_t */ int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn, int modes); /** - * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. * + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return _qed_status_t + * Return: _qed_status_t. */ int qed_init_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_hwfn_deallocate + * qed_init_free(): Init HW function deallocate. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * qed_init_store_rt_reg(): Store a configuration value in the RT array. * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. * - * @param p_hwfn - * @param rt_offset - * @param val + * Return: Void. */ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, @@ -72,15 +74,6 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, #define OVERWRITE_RT_REG(hwfn, offset, val) \ qed_init_store_rt_reg(hwfn, offset, val) -/** - * @brief - * - * - * @param p_hwfn - * @param rt_offset - * @param val - * @param size - */ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *val, @@ -90,11 +83,12 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) /** - * @brief - * Initialize GTT global windows and set admin window - * related params of GTT/PTT to default values. + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c5550e96bbe1..eb8e0f4242d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -53,51 +53,54 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_igu_enable_int - enable device interrupts + * qed_int_igu_enable_int(): Enable device interrupts. * - * @param p_hwfn - * @param p_ptt - * @param int_mode - interrupt mode to use + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. */ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief qed_int_igu_disable_int - disable device interrupts + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc - * register from igu. + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u64 + * Return: u64. */ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); #define QED_SP_SB_ID 0xffff /** - * @brief qed_int_sb_init - Initializes the sb_info structure. + * qed_int_sb_init(): Initializes the sb_info structure. * - * once the structure is initialized it can be passed to sb related functions. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block * - * @param p_hwfn - * @param p_ptt - * @param sb_info points to an uninitialized (but - * allocated) sb_info structure - * @param sb_virt_addr - * @param sb_phy_addr - * @param sb_id the sb_id to be used (zero based in driver) - * should use QED_SP_SB_ID for SP Status block + * Return: int. * - * @return int + * Once the structure is initialized it can be passed to sb related functions. */ int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id); /** - * @brief qed_int_sb_setup - Setup the sb. + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. * - * @param p_hwfn - * @param p_ptt - * @param sb_info initialized sb_info structure + * Return: Void. */ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info); /** - * @brief qed_int_sb_release - releases the sb_info structure. + * qed_int_sb_release(): Releases the sb_info structure. * - * once the structure is released, it's memory can be freed + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). * - * @param p_hwfn - * @param sb_info points to an allocated sb_info structure - * @param sb_id the sb_id to be used (zero based in driver) - * should never be equal to QED_SP_SB_ID - * (SP Status block) + * Return: int. * - * @return int + * Once the structure is released, it's memory can be freed. */ int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id); /** - * @brief qed_int_sp_dpc - To be called when an interrupt is received on the - * default status block. + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. * - * @param p_hwfn - pointer to hwfn + * @t: Tasklet. + * + * Return: Void. * */ void qed_int_sp_dpc(struct tasklet_struct *t); /** - * @brief qed_int_get_num_sbs - get the number of status - * blocks configured for this funciton in the igu. + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. * - * @param p_hwfn - * @param p_sb_cnt_info + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. * - * @return int - number of status blocks configured + * Return: Void. */ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info); /** - * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR * release. The API need to be called after releasing all slowpath IRQs * of the device. * - * @param cdev + * @cdev: Qed dev pointer. * + * Return: Void. */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** - * @brief qed_int_attn_clr_enable - sets whether the general behavior is + * qed_int_attn_clr_enable: Sets whether the general behavior is * preventing attentions from being reasserted, or following the * attributes of the specific attention. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. * */ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** - * @brief - Doorbell Recovery handler. + * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -223,30 +235,34 @@ struct qed_igu_info { }; /** - * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id * - * @param p_hwfn - * @param sb_id - user provided sb_id + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. * - * @return an index inside IGU CAM where the SB resides + * Return: An index inside IGU CAM where the SB resides. */ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief return a pointer to an unused valid SB + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB * - * @param p_hwfn - * @param b_is_pf - true iff we want a SB belonging to a PF + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. * - * @return point to an igu_block, NULL if none is available + * Return: Point to an igu_block, NULL if none is available. */ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf); @@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * qed_int_igu_read_cam(): Reads the IGU CAM. * This function needs to be called during hardware * prepare. It reads the info from igu cam to know which * status block is the default / base status block etc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_int_register_cb - Register callback func for - * slowhwfn statusblock. - * - * Every protocol that uses the slowhwfn status block - * should register a callback function that will be called - * once there is an update of the sp status block. - * - * @param p_hwfn - * @param comp_cb - function to be called when there is an - * interrupt on the sp sb - * - * @param cookie - passed to the callback function - * @param sb_idx - OUT parameter which gives the chosen index - * for this protocol. - * @param p_fw_cons - pointer to the actual address of the - * consumer for this protocol. - * - * @return int + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. */ int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, @@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons); /** - * @brief qed_int_unregister_cb - Unregisters callback - * function from sp sb. - * Partner of qed_int_register_cb -> should be called - * when no longer required. + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. * - * @param p_hwfn - * @param pi + * Return: Int. * - * @return int + * Partner of qed_int_register_cb -> should be called + * when no longer required. */ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi); /** - * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u16 + * Return: u16. */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); /** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - igu status block id - * @param opaque - opaque fid of the sb owner. - * @param b_set - set(1) / clear(0) + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, bool b_set); /** - * @brief qed_int_cau_conf - configure cau for a given status - * block - * - * @param p_hwfn - * @param ptt - * @param sb_phys - * @param igu_sb_id - * @param vf_number - * @param vf_valid + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. */ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid); /** - * @brief qed_int_alloc + * qed_int_alloc(): QED interrupt alloc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_free + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_int_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_setup + * qed_int_setup(): QED interrupt setup. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Enable Interrupt & Attention for hw function + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. * - * @param p_hwfn - * @param p_ptt - * @param int_mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode * - * @return int + * Return: Int. */ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief - Initialize CAU status block entry + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. * - * @param p_hwfn - * @param p_sb_entry - * @param pf_id - * @param vf_number - * @param vf_valid + * Return: Void. */ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index db926d8b3033..f11139177277 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1000,13 +1000,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, - struct qed_iscsi_stats *stats) + struct qed_iscsi_stats *stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(stats, 0, sizeof(*stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EAGAIN; @@ -1337,9 +1338,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_iscsi_stats_context(struct qed_dev *cdev, + struct qed_iscsi_stats *stats, + bool is_atomic) +{ + return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) { - return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_iscsi_stats_context(cdev, stats, false); } static int qed_iscsi_change_mac(struct qed_dev *cdev, @@ -1359,13 +1367,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev, } void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) { struct qed_iscsi_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_iscsi_stats(cdev, &proto_stats)) { + if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect ISCSI statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dab7a5d09f87..974cb8d26608 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -34,13 +34,19 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** - * @brief - Fills provided statistics struct with statistics. + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. * - * @param cdev - * @param stats - points to struct that will be filled with statistics. + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats); + struct qed_mcp_iscsi_stats *stats, + bool is_atomic); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { @@ -53,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) {} + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) {} #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index bc17bc36d346..6ffa6425a75a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, } static void _qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) + struct qed_eth_stats *stats, + bool is_atomic) { u8 fw_vport = 0; int i; @@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) - : NULL; + struct qed_ptt *p_ptt; bool b_get_port_stats; + p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) + : NULL; if (IS_PF(cdev)) { /* The main vport index is relative first */ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { @@ -1901,6 +1903,13 @@ out: void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { + qed_get_vport_stats_context(cdev, stats, false); +} + +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic) +{ u32 i; if (!cdev || cdev->recov_in_prog) { @@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) return; } - _qed_get_vport_stats(cdev, stats); + _qed_get_vport_stats(cdev, stats, is_atomic); if (!cdev->reset_stats) return; @@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev) if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); } else { - _qed_get_vport_stats(cdev, cdev->reset_stats); + _qed_get_vport_stats(cdev, cdev->reset_stats, false); cdev->reset_stats->common.link_change_count = 0; } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8eceeebb1a7b..602a12a348b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -92,18 +92,18 @@ struct qed_filter_mcast { }; /** - * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. * - * @param p_hwfn - * @param p_rxq Handler of queue to close - * @param eq_completion_only If True completion will be on - * EQe, if False completion will be - * on EQe if p_hwfn opaque - * different from the RXQ opaque - * otherwise on CQe. - * @param cqe_completion If True completion will be - * receive on CQe. - * @return int + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. */ int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, bool eq_completion_only, bool cqe_completion); /** - * @brief qed_eth_tx_queue_stop - closes a Tx queue + * qed_eth_tx_queue_stop(): Closes a Tx queue. * - * @param p_hwfn - * @param p_txq - handle to Tx queue needed to be closed + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. * - * @return int + * Return: Int. */ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); @@ -205,16 +205,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_vport_stop - - * - * This ramrod closes a VPort after all its RX and TX queues are terminated. - * An Assert is generated if any queues are left open. + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. * - * @param p_hwfn - * @param opaque_fid - * @param vport_id VPort ID + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. * - * @return int + * Return: Int. */ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); @@ -225,22 +224,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_rx_eth_queues_update - - * - * This ramrod updates an RX queue. It is used for setting the active state - * of the queue and updating the TPA and SGE parameters. - * - * @note At the moment - only used by non-linux VFs. + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. * - * @param p_hwfn - * @param pp_rxq_handlers An array of queue handlers to be updated. - * @param num_rxqs number of queues to update. - * @param complete_cqe_flg Post completion to the CQE Ring if set - * @param complete_event_flg Post completion to the Event Ring if set - * @param comp_mode - * @param p_comp_data + * Return: Int. * - * @return int + * Note At the moment - only used by non-linux VFs. */ int @@ -252,35 +250,61 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +/** + * qed_get_vport_stats(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. + */ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); +/** + * qed_get_vport_stats_context(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic); + void qed_reset_vport_stats(struct qed_dev *cdev); /** - * *@brief qed_arfs_mode_configure - - * - **Enable or disable rfs mode. It must accept atleast one of tcp or udp true - **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. * - **@param p_hwfn - **@param p_ptt - **@param p_cfg_params - arfs mode configuration parameters. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. * + * Return. Void. */ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params); /** - * @brief - qed_configure_rfs_ntuple_filter + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter * - * This ramrod should be used to add or remove arfs hw filter + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. * - * @params p_hwfn - * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize - * it with cookie and callback function address, if not - * using this mode then client must pass NULL. - * @params p_params + * Return: Void. */ int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, @@ -374,16 +398,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); /** - * @brief - Starts an Rx queue, when queue_cid is already prepared + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param bd_max_bytes - * @param bd_chain_phys_addr - * @param cqe_pbl_addr - * @param cqe_pbl_size + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. * - * @return int + * Return: Int. */ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, @@ -393,15 +418,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); /** - * @brief - Starts a Tx queue, where queue_cid is already prepared + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param pbl_addr - * @param pbl_size - * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. * - * @return int + * Return: Int. */ int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index df88d00053a2..f80f7739ff8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -119,41 +119,41 @@ struct qed_ll2_info { extern const struct qed_ll2_ops qed_ll2_ops_pass; /** - * @brief qed_ll2_acquire_connection - allocate resources, - * starts rx & tx (if relevant) queues pair. Provides - * connecion handler as output parameter. + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param data - describes connection parameters - * @return int + * Return: Int. */ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** - * @brief qed_ll2_establish_connection - start previously - * allocated LL2 queues pair + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair * - * @param cxt - pointer to the hw-function [opaque to some] - * @param p_ptt - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param addr rx (physical address) buffers to submit - * @param cookie - * @param notify_fw produce corresponding Rx BD immediately + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, @@ -161,15 +161,15 @@ int qed_ll2_post_rx_buffer(void *cxt, u16 buf_len, void *cookie, u8 notify_fw); /** - * @brief qed_ll2_prepare_tx_packet - request for start Tx BD - * to prepare Tx packet submission to FW. + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle - * @param pkt - info regarding the tx packet - * @param notify_fw - issue doorbell to fw for this packet + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, @@ -177,81 +177,83 @@ int qed_ll2_prepare_tx_packet(void *cxt, bool notify_fw); /** - * @brief qed_ll2_release_connection - releases resources - * allocated for LL2 connection + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * + * Return: Void. */ void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill - * Tx BD of BDs requested by - * qed_ll2_prepare_tx_packet + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection - * @param addr - * @param nbytes + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); /** - * @brief qed_ll2_terminate_connection - stops Tx/Rx queues - * + * qed_ll2_terminate_connection(): Stops Tx/Rx queues * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_get_stats - get LL2 queue's statistics - * + * qed_ll2_get_stats(): Get LL2 queue's statistics * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param p_stats + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** - * @brief qed_ll2_alloc - Allocates LL2 connections set + * qed_ll2_alloc(): Allocates LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_setup - Inits LL2 connections set + * qed_ll2_setup(): Inits LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. * */ void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_free - Releases LL2 connections set + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d10e1cd6d2ba..26700b0b4b37 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -3054,7 +3054,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, switch (type) { case QED_MCP_LAN_STATS: - qed_get_vport_stats(cdev, ð_stats); + qed_get_vport_stats_context(cdev, ð_stats, true); stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; stats->lan_stats.ucast_tx_pkts = @@ -3062,10 +3062,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.fcs_err = -1; break; case QED_MCP_FCOE_STATS: - qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); break; case QED_MCP_ISCSI_STATS: - qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); + qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); break; default: DP_VERBOSE(cdev, QED_MSG_SP, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8edb450d0abf..352b757183e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -266,97 +266,97 @@ union qed_mfw_tlv_data { #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) /** - * @brief - returns the link params of the hw function + * qed_mcp_get_link_params(): Returns the link params of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link params + * Returns: Pointer to link params. */ -struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); /** - * @brief - return the link state of the hw function + * qed_mcp_get_link_state(): Return the link state of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link state + * Returns: Pointer to link state. */ -struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); /** - * @brief - return the link capabilities of the hw function + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link capabilities + * Returns: Pointer to link capabilities. */ struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); /** - * @brief Request the MFW to set the the link according to 'link_input'. + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. * - * @param p_hwfn - * @param p_ptt - * @param b_up - raise link if `true'. Reset link if `false'. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. * - * @return int + * Return: Int. */ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up); /** - * @brief Get the management firmware version value + * qed_mcp_get_mfw_ver(): Get the management firmware version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mfw_ver - mfw version value - * @param p_running_bundle_id - image id in nvram; Optional. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** - * @brief Get the MBI version value + * qed_mcp_get_mbi_ver(): Get the MBI version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver); /** - * @brief Get media type value of the port. + * qed_mcp_get_media_type(): Get media type value of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param mfw_ver - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value * - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *media_type); /** - * @brief Get transceiver data of the port. + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_transceiver_state - transceiver state. - * @param p_transceiver_type - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, u32 *p_tranceiver_type); /** - * @brief Get transceiver supported speed mask. + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_speed_mask - Bit mask of all supported speeds. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask); /** - * @brief Get board configuration. + * qed_mcp_get_board_config(): Get board configuration. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_board_config - Board config. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config); /** - * @brief General function for sending commands to the MCP - * mailbox. It acquire mutex lock for the entire - * operation, from sending the request until the MCP - * response. Waiting for MCP response will be checked up - * to 5 seconds every 5ms. + * qed_mcp_cmd(): General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param cmd - command to be sent to the MCP. - * @param param - Optional param - * @param o_mcp_resp - The MCP response code (exclude sequence). - * @param o_mcp_param- Optional parameter provided by the MCP + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP * response - * @return int - 0 - operation - * was successul. + * + * Return: Int - 0 - Operation was successul. */ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param); /** - * @brief - drains the nig, allowing completion to pass in case of pauses. - * (Should be called only from sleepable context) + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. */ int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the flash size value + * qed_mcp_get_flash_size(): Get the flash size value. * - * @param p_hwfn - * @param p_ptt - * @param p_flash_size - flash size in bytes to be filled. + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size); /** - * @brief Send driver version to MFW + * qed_mcp_send_drv_version(): Send driver version to MFW. * - * @param p_hwfn - * @param p_ptt - * @param version - Version value - * @param name - Protocol driver name + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, @@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** - * @brief Read the MFW process kill counter + * qed_get_process_kill_counter(): Read the MFW process kill counter. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return u32 + * Return: u32. */ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Trigger a recovery process + * qed_start_recovery_process(): Trigger a recovery process. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief A recovery handler must call this function as its first step. - * It is assumed that the handler is not run from an interrupt context. + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. * - * @param cdev - * @param p_ptt + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_recovery_prolog(struct qed_dev *cdev); /** - * @brief Notify MFW about the change in base device properties + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties * - * @param p_hwfn - * @param p_ptt - * @param client - qed client type + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client); /** - * @brief Notify MFW about the driver state + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. * - * @param p_hwfn - * @param p_ptt - * @param drv_state - Driver state + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state); /** - * @brief Send MTU size to MFW + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mtu - MTU size + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu); /** - * @brief Send MAC address to MFW + * qed_mcp_ov_update_mac(): Send MAC address to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mac - MAC address + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *mac); /** - * @brief Send WOL mode to MFW + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param wol - WOL mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol); /** - * @brief Set LED status + * qed_mcp_set_led(): Set LED status. * - * @param p_hwfn - * @param p_ptt - * @param mode - LED mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); /** - * @brief Read from nvm + * qed_mcp_nvm_read(): Read from NVM. * - * @param cdev - * @param addr - nvm offset - * @param p_buf - nvm read buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); /** - * @brief Write to nvm + * qed_mcp_nvm_write(): Write to NVM. * - * @param cdev - * @param addr - nvm offset - * @param cmd - nvm command - * @param p_buf - nvm write buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Check latest response + * qed_mcp_nvm_resp(): Check latest response. * - * @param cdev - * @param p_buf - nvm write buffer + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); @@ -604,13 +606,13 @@ struct qed_nvm_image_att { }; /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image to get attributes for - * @param p_image_att - image attributes structure into which to fill data + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, @@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att); /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image requested for reading - * @param p_buffer - allocated buffer into which to fill data - * @param buffer_len - length of the allocated buffer. + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. * - * @return 0 iff p_buffer now contains the nvram image. + * Return: 0 if p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); /** - * @brief Bist register test + * qed_mcp_bist_register_test(): Bist register test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist clock test + * qed_mcp_bist_clock_test(): Bist clock test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist nvm test - get number of images + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param num_images - number of images if operation was + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was * successful. 0 if not. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images); /** - * @brief Bist nvm test - get image attributes by index + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_image_att - Attributes of image - * @param image_index - Index of image to get information for + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, u32 image_index); /** - * @brief - Processes the TLV request from MFW i.e., get the required TLV info - * from the qed client and send it to the MFW. + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Send raw debug data to the MFW + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. * - * @param p_hwfn - * @param p_ptt - * @param p_buf - raw debug data buffer - * @param size - buffer size + * Return : Int. */ int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, @@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) } /** - * @brief Initialize the interface with the MCP + * qed_mcp_cmd_init(): Initialize the interface with the MCP. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Initialize the port interface with the MCP + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. * - * @param p_hwfn - * @param p_ptt * Can only be called after `num_ports_in_engines' is set */ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Releases resources allocated during the init process. + * qed_mcp_free(): Releases resources allocated during the init process. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW function. * - * @return int + * Return: Int. */ int qed_mcp_free(struct qed_hwfn *p_hwfn); /** - * @brief This function is called from the DPC context. After - * pointing PTT to the mfw mb, check for events sent by the MCP - * to the driver and ack them. In case a critical event - * detected, it will be handled here, otherwise the work will be - * queued to a sleepable work-queue. + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. * - * @param p_hwfn - HW function - * @param p_ptt - PTT required for register access - * @return int - 0 - operation - * was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -858,106 +866,111 @@ struct qed_load_req_params { }; /** - * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, - * returns whether this PF is the first on the engine/port or function. + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); /** - * @brief Sends a LOAD_DONE message to the MFW + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_REQ message to the MFW + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_DONE message to the MFW + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read the MFW mailbox into Current buffer. + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Ack to mfw that driver finished FLR process for VFs + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs * - * @param p_hwfn - * @param p_ptt - * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. * - * @param return int - 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack); /** - * @brief - calls during init to read shmem of all function-related info. + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. * - * @param p_hwfn + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Reset the MCP using mailbox command. + * qed_mcp_reset(): Reset the MCP using mailbox command. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Sends an NVM read command request to the MFW to get - * a buffer. + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. * - * @param p_hwfn - * @param p_ptt - * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or - * DRV_MSG_CODE_NVM_READ_NVRAM commands - * @param param - [0:23] - Offset [24:31] - Size - * @param o_mcp_resp - MCP response - * @param o_mcp_param - MCP response param - * @param o_txn_size - Buffer size output - * @param o_buf - Pointer to the buffer returned by the MFW. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -967,60 +980,61 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); /** - * @brief Read from sfp + * qed_mcp_phy_sfp_read(): Read from sfp. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param port - transceiver port - * @param addr - I2C address - * @param offset - offset in sfp - * @param len - buffer length - * @param p_buf - buffer to read into + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); /** - * @brief indicates whether the MFW objects [under mcp_info] are accessible + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return true iff MFW is running and mcp_info is initialized + * Return: true if MFW is running and mcp_info is initialized. */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); /** - * @brief request MFW to configure MSI-X for a VF + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. * - * @param p_hwfn - * @param p_ptt - * @param vf_id - absolute inside engine - * @param num_sbs - number of entries to request + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. * - * @return int + * Return: Int. */ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num); /** - * @brief - Halt the MCP. + * qed_mcp_halt(): Halt the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Wake up the MCP. + * qed_mcp_resume: Wake up the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -1038,13 +1052,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); -/* @brief - Gets the mdump retained data from the MFW. +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. * - * @param p_hwfn - * @param p_ptt - * @param p_mdump_retain + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. * - * @param return 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, @@ -1052,15 +1066,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct mdump_retain_data_stc *p_mdump_retain); /** - * @brief - Sets the MFW's max value for the given resource + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param resc_max_val - * @param p_mcp_resp + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, @@ -1069,16 +1083,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, u32 resc_max_val, u32 *p_mcp_resp); /** - * @brief - Gets the MFW allocation info for the given resource + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param p_mcp_resp - * @param p_resc_num - * @param p_resc_start + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, @@ -1087,13 +1102,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** - * @brief Send eswitch mode to MFW + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param eswitch - eswitch mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1113,12 +1128,12 @@ enum qed_resc_lock { }; /** - * @brief - Initiates PF FLR + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); struct qed_resc_lock_params { @@ -1151,13 +1166,13 @@ struct qed_resc_lock_params { }; /** - * @brief Acquires MFW generic resource lock + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, @@ -1175,13 +1190,13 @@ struct qed_resc_unlock_params { }; /** - * @brief Releases MFW generic resource lock + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, @@ -1189,12 +1204,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_resc_unlock_params *p_params); /** - * @brief - default initialization for lock/unlock resource structs + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. * - * @param p_lock - lock params struct to be initialized; Can be NULL - * @param p_unlock - unlock params struct to be initialized; Can be NULL - * @param resource - the requested resource - * @paral b_is_permanent - disable retries & aging when set + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. + * + * Return: Void. */ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, @@ -1202,94 +1220,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, resource, bool b_is_permanent); /** - * @brief - Return whether management firmware support smart AN + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return bool - true if feature is supported. + * Return: bool true if feature is supported. */ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); /** - * @brief Learn of supported MFW features; To be done during early init + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Inform MFW of set of features supported by driver. Should be done - * inside the content of the LOAD_REQ. + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read ufp config from the shared memory. + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Populate the nvm info shadow in the given hardware function + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** - * @brief Delete nvm info shadow in the given hardware function + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); /** - * @brief Get the engine affinity configuration. + * qed_mcp_get_engine_config(): Get the engine affinity configuration. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the PPFID bitmap. + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get NVM config attribute value. + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param p_len + * Return: Int. */ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len); /** - * @brief Set NVM config attribute value. + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. */ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index e27dd9a4547e..7a3bd749e1e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -6,47 +6,47 @@ #include <linux/types.h> /** - * @brief qed_selftest_memory - Perform memory test + * qed_selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_memory(struct qed_dev *cdev); /** - * @brief qed_selftest_interrupt - Perform interrupt test + * qed_selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_interrupt(struct qed_dev *cdev); /** - * @brief qed_selftest_register - Perform register test + * qed_selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_register(struct qed_dev *cdev); /** - * @brief qed_selftest_clock - Perform clock test + * qed_selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_clock(struct qed_dev *cdev); /** - * @brief qed_selftest_nvram - Perform nvram test + * qed_selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_nvram(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 60ff3222bf55..c5a38f3c92b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -31,23 +31,18 @@ struct qed_spq_comp_cb { }; /** - * @brief qed_eth_cqe_completion - handles the completion of a - * ramrod on the cqe ring + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. * - * @param p_hwfn - * @param cqe + * @p_hwfn: HW device data. + * @cqe: CQE. * - * @return int + * Return: Int. */ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe); -/** - * @file - * - * QED Slow-hwfn queue interface - */ - + /* QED Slow-hwfn queue interface */ union ramrod_data { struct pf_start_ramrod_data pf_start; struct pf_update_ramrod_data pf_update; @@ -207,117 +202,128 @@ struct qed_spq { }; /** - * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that - * Pends it to the future list. + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. * - * @param p_hwfn - * @param p_req + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. * - * @return int + * Return: Int. */ int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code); /** - * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_spq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_setup - Reset the SPQ to its start state. + * qed_spq_setup(): Reset the SPQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_spq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_spq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_get_entry - Obtain an entrry from the spq - * free pool list. - * - * + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. * - * @param p_hwfn - * @param pp_ent + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. * - * @return int + * Return: Int. */ int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent); /** - * @brief qed_spq_return_entry - Return an entry to spq free - * pool list + * qed_spq_return_entry(): Return an entry to spq free pool list. * - * @param p_hwfn - * @param p_ent + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. */ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); /** - * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * qed_eq_alloc(): Allocates & initializes an EQ struct. * - * @param p_hwfn - * @param num_elem number of elements in the eq + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. * - * @return int + * Return: Int. */ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the EQ to its start state. + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_free - deallocates the given EQ struct. + * qed_eq_free(): deallocates the given EQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_eq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_prod_update - update the FW with default EQ producer + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. * - * @param p_hwfn - * @param prod + * Return: Void. */ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod); /** - * @brief qed_eq_completion - Completes currently pending EQ elements + * qed_eq_completion(): Completes currently pending EQ elements. * - * @param p_hwfn - * @param cookie + * @p_hwfn: HW device data. + * @cookie: Cookie. * - * @return int + * Return: Int. */ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_spq_completion - Completes a single event + * qed_spq_completion(): Completes a single event. * - * @param p_hwfn - * @param echo - echo value from cookie (used for determining completion) - * @param p_data - data from cookie (used in callback function if applicable) + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). * - * @return int + * Return: Int. */ int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, @@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, union event_ring_data *p_data); /** - * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u32 - SPQ CID + * Return: u32 - SPQ CID. */ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_alloc - Allocates & initializes an ConsQ - * struct + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start state. + * qed_consq_setup(): Reset the ConsQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return Void. */ void qed_consq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_free - deallocates the given ConsQ struct. + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_consq_free(struct qed_hwfn *p_hwfn); int qed_spq_pend_post(struct qed_hwfn *p_hwfn); -/** - * @file - * - * @brief Slow-hwfn low-level commands (Ramrods) function definitions. - */ +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 @@ -377,12 +382,15 @@ struct qed_sp_init_data { }; /** - * @brief Returns a SPQ entry to the pool / frees the entry if allocated. - * Should be called on in error flows after initializing the SPQ entry - * and before posting it. + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. * - * @param p_hwfn - * @param p_ent + * Return: Void. */ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); @@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_sp_init_data *p_data); /** - * @brief qed_sp_pf_start - PF Function Start Ramrod + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. * * This ramrod is sent to initialize a physical function (PF). It will * configure the function related parameters and write its completion to the @@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * allocated by the driver on host memory and its parameters are written * to the internal RAM of the UStorm by the Function Start Ramrod. * - * @param p_hwfn - * @param p_ptt - * @param p_tunn - * @param allow_npar_tx_switch - * - * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, @@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, bool allow_npar_tx_switch); /** - * @brief qed_sp_pf_update - PF Function Update Ramrod + * qed_sp_pf_update(): PF Function Update Ramrod. * - * This ramrod updates function-related parameters. Every parameter can be - * updated independently, according to configuration flags. + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. * - * @return int + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. */ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * qed_sp_pf_update_stag(): Update firmware of new outer tag. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_stop - PF Function Stop Ramrod - * - * This ramrod is sent to close a Physical Function (PF). It is the last ramrod - * sent and the last completion written to the PFs Event Ring. This ramrod also - * deletes the context for the Slowhwfn connection on this PF. - * - * @note Not required for first packet. - * - * @param p_hwfn - * - * @return int - */ - -/** - * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); @@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 7ff23ef8ccc1..0a1e44d45c1a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -251,29 +251,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; #ifdef CONFIG_QED_SRIOV /** - * @brief Check if given VF ID @vfid is valid - * w.r.t. @b_enabled_only value - * if b_enabled_only = true - only enabled VF id is valid - * else any VF id less than max_vfs is valid + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. * - * @param p_hwfn - * @param rel_vf_id - Relative VF ID - * @param b_enabled_only - consider only enabled VF - * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. * - * @return bool - true for valid VF ID + * Return: bool - true for valid VF ID */ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious); /** - * @brief - Given a VF index, return index of next [including that] active VF. + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. * - * @param p_hwfn - * @param rel_vf_id + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. * - * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); @@ -281,83 +283,92 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port); /** - * @brief Read sriov related information and allocated resources - * reads from configuration space, shmem, etc. + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); /** - * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset * - * @param p_hwfn - * @param p_iov - * @param type - * @param length + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. * - * @return pointer to the newly placed tlv + * Return: pointer to the newly placed tlv */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); /** - * @brief list the types and lengths of the tlvs on the buffer + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer * - * @param p_hwfn - * @param tlvs_list + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); /** - * @brief qed_iov_alloc - allocate sriov related resources + * qed_iov_alloc(): allocate sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_setup - setup sriov related resources + * qed_iov_setup(): setup sriov related resources + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_iov_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_free - free sriov related resources + * qed_iov_free(): free sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_iov_free(struct qed_hwfn *p_hwfn); /** - * @brief free sriov related memory that was allocated during hw_prepare + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief Mark structs of vfs that have been FLR-ed. + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. * - * @param p_hwfn - * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed * - * @return true iff one of the PF's vfs got FLRed. false otherwise. + * Return: true iff one of the PF's vfs got FLRed. false otherwise. */ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** - * @brief Search extended TLVs in request/reply buffer. + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. * - * @param p_hwfn - * @param p_tlvs_list - Pointer to tlvs list - * @param req_type - Type of TLV + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV * - * @return pointer to tlv type if found, otherwise returns NULL. + * Return: pointer to tlv type if found, otherwise returns NULL. */ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 60d2bb64e65f..976201fc7d4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -688,13 +688,16 @@ struct qed_vf_iov { }; /** - * @brief VF - Set Rx/Tx coalesce per VF's relative queue. - * Coalesce value '0' will omit the configuration. + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. * - * @param p_hwfn - * @param rx_coal - coalesce value in micro second for rx queue - * @param tx_coal - coalesce value in micro second for tx queue - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. * **/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, @@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 tx_coal, struct qed_queue_cid *p_cid); /** - * @brief VF - Get coalesce per VF's relative queue. + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * - * @param p_hwfn - * @param p_coal - coalesce value in micro second for VF queues. - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. * + * Return: Int. **/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid); #ifdef CONFIG_QED_SRIOV /** - * @brief Read the VF bulletin and act on it if needed + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * - * @param p_hwfn - * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * - * @return enum _qed_status + * Return: enum _qed_status. */ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); /** - * @brief Get link paramters for VF from qed + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. * - * @param p_hwfn - * @param params - the link params structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params); /** - * @brief Get link state for VF from qed + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF * - * @param p_hwfn - * @param link - the link state structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link); /** - * @brief Get link capabilities for VF from qed + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * - * @param p_hwfn - * @param p_link_caps - the link capabilities structure to be filled for the VF + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. */ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues * - * @param p_hwfn - * @param num_rxqs - allocated RX queues + * Return: Void. */ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * - * @param p_hwfn - * @param num_txqs - allocated RX queues + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); /** - * @brief Get number of available connections [both Rx and Tx] for VF + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections * - * @param p_hwfn - * @param num_cids - allocated number of connections + * Return: Void. */ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); /** - * @brief Get port mac address for VF + * qed_vf_get_port_mac(): Get port mac address for VF. * - * @param p_hwfn - * @param port_mac - destination location for port mac + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. */ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); /** - * @brief Get number of VLAN filters allocated for VF by qed + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters * - * @param p_hwfn - * @param num_rxqs - allocated VLAN filters + * Return: Void. */ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** - * @brief Get number of MAC filters allocated for VF by qed + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed * - * @param p_hwfn - * @param num_rxqs - allocated MAC filters + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. */ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); /** - * @brief Check if VF can set a MAC address + * qed_vf_check_mac(): Check if VF can set a MAC address * - * @param p_hwfn - * @param mac + * @p_hwfn: HW device data. + * @mac: Mac. * - * @return bool + * Return: bool. */ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); /** - * @brief Set firmware version information in dev_info from VFs acquire response tlv + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. * - * @param p_hwfn - * @param fw_major - * @param fw_minor - * @param fw_rev - * @param fw_eng + * Return: Void. */ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); /** - * @brief hw preparation for VF - * sends ACQUIRE message + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** - * @brief VF - start the RX Queue by sending a message to the PF - * @param p_hwfn - * @param p_cid - Only relative fields are relevant - * @param bd_max_bytes - maximum number of bytes per bd - * @param bd_chain_phys_addr - physical address of bd chain - * @param cqe_pbl_addr - physical address of pbl - * @param cqe_pbl_size - pbl size - * @param pp_prod - pointer to the producer to be - * used in fastpath + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod); /** - * @brief VF - start the TX queue by sending a message to the - * PF. + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. * - * @param p_hwfn - * @param tx_queue_id - zero based within the VF - * @param sb - status block for this queue - * @param sb_index - index within the status block - * @param bd_chain_phys_addr - physical address of tx chain - * @param pp_doorbell - pointer to address to which to - * write the doorbell too.. + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, @@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell); /** - * @brief VF - stop the RX queue by sending a message to the PF + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * - * @param p_hwfn - * @param p_cid - * @param cqe_completion + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion); /** - * @brief VF - stop the TX queue by sending a message to the PF + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * - * @param p_hwfn - * @param tx_qid + * @p_hwfn: HW device data. + * @p_cid: CID. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** - * @brief VF - send a vport update command + * qed_vf_pf_vport_update(): VF - send a vport update command. * - * @param p_hwfn - * @param params + * @p_hwfn: HW device data. + * @p_params: Params * - * @return int + * Return: Int. */ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params); /** + * qed_vf_pf_reset(): VF - send a close message to PF. * - * @brief VF - send a close message to PF + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); /** - * @brief VF - free vf`s memories + * qed_vf_pf_release(): VF - free vf`s memories. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** - * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * - * @param p_hwfn - * @param sb_id + * @p_hwfn: HW device data. + * @sb_id: SB ID. * - * @return INLINE u16 + * Return: INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief Stores [or removes] a configured sb_info. + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. * - * @param p_hwfn - * @param sb_id - zero-based SB index [for fastpath] - * @param sb_info - may be NULL [during removal]. + * Return: Void. */ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb); /** - * @brief qed_vf_pf_vport_start - perform vport start for VF. + * qed_vf_pf_vport_start(): perform vport start for VF. * - * @param p_hwfn - * @param vport_id - * @param mtu - * @param inner_vlan_removal - * @param tpa_mode - * @param max_buffers_per_cqe, - * @param only_untagged - default behavior regarding vlan acceptance + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, @@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 max_buffers_per_cqe, u8 only_untagged); /** - * @brief qed_vf_pf_vport_stop - stop the VF's vport + * qed_vf_pf_vport_stop(): stop the VF's vport * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); @@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd); /** - * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * qed_vf_pf_int_cleanup(): clean the SB of the VF * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); /** - * @brief - return the link params in a given bulletin board + * __qed_vf_get_link_params(): return the link params in a given bulletin board * - * @param p_hwfn - * @param p_params - pointer to a struct to fill with link params - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link state in a given bulletin board + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link state - * @param p_bulletin + * Return: Void. */ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link capabilities in a given bulletin board + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link capabilities - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, @@ -1029,9 +1062,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); /** - * @brief - Ask PF to update the MAC address in it's bulletin board + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board * - * @param p_mac - mac address to be updated in bulletin board + * Return: Int. */ int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6c22bfc16ee6..fee47c8eeff4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2832,10 +2832,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data) } /** - * qede_io_error_detected - called when PCI error is detected + * qede_io_error_detected(): Called when PCI error is detected + * * @pdev: Pointer to PCI device * @state: The current pci connection state * + *Return: pci_ers_result_t. + * * This function is called after a PCI bus error affecting * this device has been detected. */ diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 6b8013fb17c3..eb59e8abe691 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev, return err; } + /* + * SynQuacer is physically configured with TX and RX delays + * but the standard firmware claimed otherwise for a long + * time, ignore it. + */ + if (of_machine_is_compatible("socionext,developer-box") && + priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { + dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); + priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + } + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 2ab29efa6b6e..b4db50c9e703 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1556,15 +1556,15 @@ static int temac_probe(struct platform_device *pdev) } /* Error handle returned DMA RX and TX interrupts */ - if (lp->rx_irq < 0) { - if (lp->rx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA RX irq\n"); - return lp->rx_irq; + if (lp->rx_irq <= 0) { + rc = lp->rx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, + "could not get DMA RX irq\n"); } - if (lp->tx_irq < 0) { - if (lp->tx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA TX irq\n"); - return lp->tx_irq; + if (lp->tx_irq <= 0) { + rc = lp->tx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, + "could not get DMA TX irq\n"); } if (temac_np) { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 3f43c253adac..c199f0b465cd 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - ipvlan_migrate_l3s_hook(oldnet, newnet); + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 10b3f4fb2612..98ce24422424 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -160,6 +160,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) return sa; } +static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) +{ + struct macsec_rx_sa *sa = NULL; + int an; + + for (an = 0; an < MACSEC_NUM_AN; an++) { + sa = macsec_rxsa_get(rx_sc->sa[an]); + if (sa) + break; + } + return sa; +} + static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); @@ -493,18 +506,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) skb->protocol = eth_hdr(skb)->h_proto; } +static unsigned int macsec_msdu_len(struct sk_buff *skb) +{ + struct macsec_dev *macsec = macsec_priv(skb->dev); + struct macsec_secy *secy = &macsec->secy; + bool sci_present = macsec_skb_cb(skb)->has_sci; + + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; +} + static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { + unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { - txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { - txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } @@ -534,9 +557,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); - macsec_encrypt_finish(skb, dev); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); - len = skb->len; + /* packet is encrypted/protected so tx_bytes must be calculated */ + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; + macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); @@ -695,6 +719,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; + macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); @@ -736,15 +761,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) - rxsc_stats->stats.InOctetsDecrypted += skb->len; + rxsc_stats->stats.InOctetsDecrypted += msdu_len; else - rxsc_stats->stats.InOctetsValidated += skb->len; + rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } @@ -757,6 +784,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); + this_cpu_inc(rx_sa->stats->InPktsNotValid); + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -849,9 +878,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); - len = skb->len; if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); @@ -1042,6 +1071,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1151,6 +1181,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1161,11 +1192,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ + struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } @@ -1175,6 +1210,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } @@ -1195,6 +1232,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1223,6 +1261,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) @@ -1230,12 +1269,11 @@ deliver: macsec_rxsc_put(rx_sc); skb_orphan(skb); - len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1272,6 +1310,7 @@ nosci: u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1290,7 +1329,7 @@ nosci: secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -3399,21 +3438,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } + len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); - len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; @@ -3641,8 +3680,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; + s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); + s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); + s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index ef460504b235..5c4e3e94c714 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1395,8 +1395,6 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, - .set_wol = at803x_set_wol, - .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index b330efb98209..f3b39af83a27 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -412,6 +412,17 @@ static int bcm54xx_resume(struct phy_device *phydev) return bcm54xx_config_init(phydev); } +static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm54811_config_init(struct phy_device *phydev) { int err, reg; @@ -832,6 +843,8 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7c80dd59a0cb..acad1ffa4655 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3070,6 +3070,8 @@ static int phy_probe(struct device *dev) goto out; } + phy_disable_interrupts(phydev); + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values @@ -3157,16 +3159,6 @@ static int phy_remove(struct device *dev) return 0; } -static void phy_shutdown(struct device *dev) -{ - struct phy_device *phydev = to_phy_device(dev); - - if (phydev->state == PHY_READY || !phydev->attached_dev) - return; - - phy_disable_interrupts(phydev); -} - /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register @@ -3190,7 +3182,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; - new_driver->mdiodrv.driver.shutdown = phy_shutdown; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 663ce0e09c2d..bdb05d246b86 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file) q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data_uid(&q->sock, &q->sk, inode->i_uid); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4dfa9c610974..f99df92d211e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2195,7 +2195,9 @@ static void team_setup(struct net_device *dev) dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 30eea8270c9b..e685c84ebe3a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1574,7 +1574,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; @@ -3411,7 +3411,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3497b5a286ea..695e4efdc011 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -617,6 +617,13 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, @@ -624,11 +631,25 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e4fbb4d86606..566aa01ad281 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1771,6 +1771,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } else if (!info->in || !info->out) status = usbnet_get_endpoints (dev, udev); else { + u8 ep_addrs[3] = { + info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 + }; + dev->in = usb_rcvbulkpipe (xdev, info->in); dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) @@ -1780,6 +1784,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) else status = 0; + if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) + status = -EINVAL; } if (status >= 0 && dev->status) status = init_status (dev, udev); diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 7984f2157d22..df3617c4c44e 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -289,11 +289,25 @@ static const struct usb_device_id products [] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ @@ -301,6 +315,13 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 41cb9179e8b7..45ee44f66e77 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1654,10 +1654,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index af335f8266c2..3eefe8171925 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3319,8 +3319,6 @@ static int virtnet_probe(struct virtio_device *vdev) } } - _virtnet_set_queues(vi, vi->curr_queue_pairs); - /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -3333,6 +3331,8 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); err = virtnet_cpu_notif_add(vi); diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 5bf7822c53f1..0ba714ca5185 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -6,7 +6,7 @@ #include "allowedips.h" #include "peer.h" -enum { MAX_ALLOWEDIPS_BITS = 128 }; +enum { MAX_ALLOWEDIPS_DEPTH = 129 }; static struct kmem_cache *node_cache; @@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { - if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS)) + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) return; stack[(*len)++] = rcu_dereference_raw(p); } @@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu) static void root_free_rcu(struct rcu_head *rcu) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; @@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu) static void root_remove_peer_lists(struct allowedips_node *root) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root }; + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c index 41db10f9be49..2c9eec24eec4 100644 --- a/drivers/net/wireguard/selftest/allowedips.c +++ b/drivers/net/wireguard/selftest/allowedips.c @@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void) wg_allowedips_remove_by_peer(&t, a, &mutex); test_negative(4, a, 192, 168, 0, 1); - /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node + /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node * if something goes wrong. */ - for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) { - part = cpu_to_be64(~(1LLU << (i % 64))); - memset(&ip, 0xff, 16); - memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + for (i = 0; i < 64; ++i) { + part = cpu_to_be64(~0LLU << i); + memset(&ip, 0xff, 8); + memcpy((u8 *)&ip + 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + memcpy(&ip, &part, 8); + memset((u8 *)&ip + 8, 0, 8); wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); } - + memset(&ip, 0, 16); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); wg_allowedips_free(&t, &mutex); wg_allowedips_init(&t); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index 6dbaaf95ee38..2092aa373ab3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -123,12 +123,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) case MT_EE_5GHZ: dev->mphy.cap.has_5ghz = true; break; - case MT_EE_2GHZ: - dev->mphy.cap.has_2ghz = true; - break; case MT_EE_DBDC: dev->dbdc_support = true; fallthrough; + case MT_EE_2GHZ: + dev->mphy.cap.has_2ghz = true; + break; default: dev->mphy.cap.has_2ghz = true; dev->mphy.cap.has_5ghz = true; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2db9c166a1b7..b76e1d4adcc7 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -989,6 +989,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl); if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { /* @@ -997,6 +998,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(&ctrl->ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, @@ -1038,7 +1040,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { - nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index fb47d0603e05..4ca7ef941600 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1888,6 +1888,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(ctrl); nvme_start_queues(ctrl); if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { /* @@ -1896,6 +1897,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->tagset, @@ -2014,7 +2016,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (ctrl->queue_count <= 1) return; blk_mq_quiesce_queue(ctrl->admin_q); - nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index cd3821a6444f..4e436f2d13ae 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); -#ifdef DEBUG -const char *action_names[] = { +static const char *action_names[] = { + [0] = "INVALID", [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", }; -#endif int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) { @@ -594,21 +593,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_add_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: add_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: remove_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_UPDATE_PROPERTY: @@ -622,20 +609,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_update_property(ce->np, ce->prop, &old_prop); - if (ret) { - pr_err("changeset: update_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; default: ret = -EINVAL; } raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (ret) + if (ret) { + pr_err("changeset: apply failed: %-15s %pOF:%s\n", + action_names[ce->action], ce->np, ce->prop->name); return ret; + } switch (ce->action) { case OF_RECONFIG_ATTACH_NODE: @@ -921,6 +905,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, if (!ce) return -ENOMEM; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 52bb68fb2216..3a07cc58e7d7 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -187,8 +187,8 @@ int ima_free_kexec_buffer(void) if (ret) return ret; - return memblock_free(addr, size); - + memblock_free_late(addr, size); + return 0; } /** diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 2bee1d992408..a9871e2f0a0b 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -657,12 +657,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", "phandle", 0, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index bdd84765e646..765abe073228 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -239,6 +239,7 @@ #define EP_STATE_ENABLED 1 static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, GEN3_CORE_CLK_FREQ, @@ -452,7 +453,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); /* If EP doesn't advertise L1SS, just return */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); @@ -989,7 +994,11 @@ retry_link: speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); tegra_pcie_enable_interrupts(pp); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index f031302ad401..0a37967b0a93 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -503,12 +503,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - __pci_bus_size_bridges(dev->subordinate, - &add_list); + if (pci_is_root_bus(bus)) + __pci_bus_size_bridges(dev->subordinate, &add_list); } } } - __pci_bus_assign_resources(bus, &add_list, NULL); + if (pci_is_root_bus(bus)) + __pci_bus_assign_resources(bus, &add_list, NULL); + else + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 1cac52870711..e6c90c0bb764 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s) q = p->next; kfree(p); } + + kfree(data); } diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index 32829eb9656c..ddd8ee6b604e 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -14,6 +14,7 @@ #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/of_device.h> #include <linux/pinctrl/pinmux.h> @@ -46,6 +47,7 @@ struct rza2_pinctrl_priv { struct pinctrl_dev *pctl; struct pinctrl_gpio_range gpio_range; int npins; + struct mutex mutex; /* serialize adding groups and functions */ }; #define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */ @@ -359,10 +361,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, psel_val[i] = MUX_FUNC(value); } + mutex_lock(&priv->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL); - if (gsel < 0) - return gsel; + if (gsel < 0) { + ret = gsel; + goto unlock; + } /* * Register a single group function where the 'data' is an array PSEL @@ -391,6 +397,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, (*map)->data.mux.function = np->name; *num_maps = 1; + mutex_unlock(&priv->mutex); + return 0; remove_function: @@ -399,6 +407,9 @@ remove_function: remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&priv->mutex); + dev_err(priv->dev, "Unable to parse DT node %s\n", np->name); return ret; @@ -474,6 +485,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mutex_init(&priv->mutex); + platform_set_drvdata(pdev, priv); priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) * diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a5aa0bdc61d6..e8c360879883 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -717,7 +717,6 @@ struct qeth_card_info { u16 chid; u8 ids_valid:1; /* cssid,iid,chid */ u8 dev_addr_is_registered:1; - u8 open_when_online:1; u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e9807d2996a9..62e7576bff53 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5459,8 +5459,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, qeth_clear_ipacmd_list(card); rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); netif_device_detach(card->dev); netif_carrier_off(card->dev); rtnl_unlock(); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d694e3ff8086..7cdf3274cf96 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2373,9 +2373,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) qeth_enable_hw_features(dev); qeth_l2_enable_brport_features(card); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + qeth_l2_set_rx_mode(dev); } rtnl_unlock(); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6fd3e288f059..93f55c734802 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2029,9 +2029,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok) netif_device_attach(dev); qeth_enable_hw_features(dev); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } rtnl_unlock(); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index b61acbb09be3..d323f9985c48 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index a12e3525977d..2f810dac7b8b 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1599,7 +1599,7 @@ NCR_700_intr(int irq, void *dev_id) printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; - } else if(dsp >= to32bit(&slot->pSG[0].ins) && + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d084a7db3925..e6c36b5b0739 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -2172,7 +2172,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, bool new_sc) { - int ret = SUCCESS; + int ret = 0; struct fnic_pending_aborts_iter_data iter_data = { .fnic = fnic, .lun_dev = lr_sc->device, @@ -2192,9 +2192,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, /* walk again to check, if IOs are still pending in fw */ if (fnic_is_abts_pending(fnic, lr_sc)) - ret = FAILED; + ret = 1; clean_pending_aborts_end: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: exit status: %d\n", __func__, ret); return ret; } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cf10c1a60399..61959dd2237f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev); static void qedf_schedule_recovery_handler(void *dev); static void qedf_recovery_handler(struct work_struct *work); +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); /* * Driver module parameters. @@ -3276,6 +3277,7 @@ static struct pci_driver qedf_pci_driver = { .probe = qedf_probe, .remove = qedf_remove, .shutdown = qedf_shutdown, + .suspend = qedf_suspend, }; static int __qedf_probe(struct pci_dev *pdev, int mode) @@ -4005,6 +4007,22 @@ static void qedf_shutdown(struct pci_dev *pdev) __qedf_remove(pdev, QEDF_MODE_NORMAL); } +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedf = pci_get_drvdata(pdev); + + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + /* * Recovery handler code */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e0096fc5927e..d2fe8ae97abc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static void qedi_recovery_handler(struct work_struct *work); static void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { @@ -2515,6 +2516,22 @@ static void qedi_shutdown(struct pci_dev *pdev) __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); } +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedi_ctx *qedi; + + if (!pdev) { + QEDI_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + static int __qedi_probe(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi; @@ -2873,6 +2890,7 @@ static struct pci_driver qedi_pci_driver = { .remove = qedi_remove, .shutdown = qedi_shutdown, .err_handler = &qedi_err_handler, + .suspend = qedi_suspend, }; static int __init qedi_init(void) diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 898a0bdf8df6..95a86e0dfd77 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -209,53 +209,6 @@ raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); -static void raid_component_release(struct device *dev) -{ - struct raid_component *rc = - container_of(dev, struct raid_component, dev); - dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n"); - put_device(rc->dev.parent); - kfree(rc); -} - -int raid_component_add(struct raid_template *r,struct device *raid_dev, - struct device *component_dev) -{ - struct device *cdev = - attribute_container_find_class_device(&r->raid_attrs.ac, - raid_dev); - struct raid_component *rc; - struct raid_data *rd = dev_get_drvdata(cdev); - int err; - - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - INIT_LIST_HEAD(&rc->node); - device_initialize(&rc->dev); - rc->dev.release = raid_component_release; - rc->dev.parent = get_device(component_dev); - rc->num = rd->component_count++; - - dev_set_name(&rc->dev, "component-%d", rc->num); - list_add_tail(&rc->node, &rd->component_list); - rc->dev.class = &raid_class.class; - err = device_add(&rc->dev); - if (err) - goto err_out; - - return 0; - -err_out: - list_del(&rc->node); - rd->component_count--; - put_device(component_dev); - kfree(rc); - return err; -} -EXPORT_SYMBOL(raid_component_add); - struct raid_template * raid_class_attach(struct raid_function_template *ft) { diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index d6982d355739..94603e64cc6b 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 7cf871323b2c..e362453e8d26 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -321,7 +321,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); spin_unlock_irqrestore(snic->shost->host_lock, flags); - kfree(tgt); + put_device(&tgt->dev); tgt = NULL; return tgt; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index c9b1500c2ab8..9f8ebbec7bc3 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -406,6 +406,7 @@ static void storvsc_on_channel_callback(void *context); #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 #define STORVSC_FC_MAX_TARGETS 128 #define STORVSC_FC_MAX_CHANNELS 8 +#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 #define STORVSC_IDE_MAX_TARGETS 1 @@ -1729,10 +1730,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { -#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (scmnd->device->host->transportt == fc_transport_template) - return fc_eh_timed_out(scmnd); -#endif return BLK_EH_RESET_TIMER; } @@ -2071,6 +2068,9 @@ static int storvsc_probe(struct hv_device *device, * protecting it from any weird value. */ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + if (is_fc) + max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); + /* max_hw_sectors_kb */ host->max_sectors = max_xfer_bytes >> 9; /* diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c index 1ca140356a08..3f759121dc00 100644 --- a/drivers/soc/aspeed/aspeed-socinfo.c +++ b/drivers/soc/aspeed/aspeed-socinfo.c @@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void) soc_dev = soc_device_register(attrs); if (IS_ERR(soc_dev)) { + kfree(attrs->machine); kfree(attrs->soc_id); kfree(attrs->serial_number); kfree(attrs); diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index b7cdfa65157c..230a3250f315 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -828,8 +828,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, "%s: initializing enumeration and init completion for Slave %d\n", __func__, slave->dev_num); - init_completion(&slave->enumeration_complete); - init_completion(&slave->initialization_complete); + reinit_completion(&slave->enumeration_complete); + reinit_completion(&slave->initialization_complete); } else if ((status == SDW_SLAVE_ATTACHED) && (slave->status == SDW_SLAVE_UNATTACHED)) { @@ -837,7 +837,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, "%s: signaling enumeration completion for Slave %d\n", __func__, slave->dev_num); - complete(&slave->enumeration_complete); + complete_all(&slave->enumeration_complete); } slave->status = status; mutex_unlock(&bus->bus_lock); @@ -1840,7 +1840,19 @@ int sdw_handle_slave_status(struct sdw_bus *bus, "%s: signaling initialization completion for Slave %d\n", __func__, slave->dev_num); - complete(&slave->initialization_complete); + complete_all(&slave->initialization_complete); + + /* + * If the manager became pm_runtime active, the peripherals will be + * restarted and attach, but their pm_runtime status may remain + * suspended. If the 'update_slave_status' callback initiates + * any sort of deferred processing, this processing would not be + * cancelled on pm_runtime suspend. + * To avoid such zombie states, we queue a request to resume. + * This would be a no-op in case the peripheral was being resumed + * by e.g. the ALSA/ASoC framework. + */ + pm_request_resume(&slave->dev); } } diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 566c03105fb8..1b7ab0bbd132 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -208,6 +208,21 @@ static ssize_t nvm_authenticate_show(struct device *dev, return ret; } +static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) +{ + int i; + + tb_port_dbg(port, "reading NVM authentication status of retimers\n"); + + /* + * Before doing anything else, read the authentication status. + * If the retimer has it set, store it for the new retimer + * device instance. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); +} + static void tb_retimer_set_inbound_sbtx(struct tb_port *port) { int i; @@ -481,18 +496,16 @@ int tb_retimer_scan(struct tb_port *port, bool add) return ret; /* - * Enable sideband channel for each retimer. We can do this - * regardless whether there is device connected or not. + * Immediately after sending enumerate retimers read the + * authentication status of each retimer. */ - tb_retimer_set_inbound_sbtx(port); + tb_retimer_nvm_authenticate_status(port, status); /* - * Before doing anything else, read the authentication status. - * If the retimer has it set, store it for the new retimer - * device instance. + * Enable sideband channel for each retimer. We can do this + * regardless whether there is device connected or not. */ - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) - usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); + tb_retimer_set_inbound_sbtx(port); for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { /* diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 54173c23263c..67889c014414 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2387,12 +2387,13 @@ static void gsm_error(struct gsm_mux *gsm, static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; - struct gsm_dlci *dlci = gsm->dlci[0]; + struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); + dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index ad5b742a68cd..74e477016f25 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -3274,6 +3274,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port; spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 46a380b83df8..c9f8d71988f8 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -259,6 +259,7 @@ enum lpuart_type { IMX7ULP_LPUART, IMX8ULP_LPUART, IMX8QXP_LPUART, + IMXRT1050_LPUART, }; struct lpuart_port { @@ -311,7 +312,7 @@ static const struct lpuart_soc_data vf_data = { static const struct lpuart_soc_data ls1021a_data = { .devtype = LS1021A_LPUART, .iotype = UPIO_MEM32BE, - .rx_watermark = 0, + .rx_watermark = 1, }; static const struct lpuart_soc_data ls1028a_data = { @@ -324,7 +325,7 @@ static struct lpuart_soc_data imx7ulp_data = { .devtype = IMX7ULP_LPUART, .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, - .rx_watermark = 0, + .rx_watermark = 1, }; static struct lpuart_soc_data imx8ulp_data = { @@ -338,9 +339,17 @@ static struct lpuart_soc_data imx8qxp_data = { .devtype = IMX8QXP_LPUART, .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, + .rx_watermark = 31, }; +static struct lpuart_soc_data imxrt1050_data = { + .devtype = IMXRT1050_LPUART, + .iotype = UPIO_MEM32, + .reg_off = IMX_REG_OFF, + .rx_watermark = 1, +}; + static const struct of_device_id lpuart_dt_ids[] = { { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, }, @@ -348,6 +357,7 @@ static const struct of_device_id lpuart_dt_ids[] = { { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, { .compatible = "fsl,imx8ulp-lpuart", .data = &imx8ulp_data, }, { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, + { .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, lpuart_dt_ids); @@ -1191,8 +1201,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { - /* Read DR to clear the error flags */ - lpuart32_read(&sport->port, UARTDATA); + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT); if (sr & UARTSTAT_PE) sport->port.icount.parity++; @@ -1739,6 +1749,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport) /* set the watermark */ if (uart_console(&sport->port)) sport->rx_watermark = 1; + val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF); lpuart32_write(&sport->port, val, UARTWATER); @@ -2849,6 +2860,7 @@ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup) OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup); EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 1325f0010ee7..7acaed3bd7ff 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -71,6 +71,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = { CI_HDRC_PMQOS, }; +static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = { + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM, +}; + static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data}, { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data}, @@ -81,6 +85,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data}, { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data}, { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data}, + { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 0158148cb054..521c95935d4c 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -42,6 +42,7 @@ struct usb_conn_info { struct power_supply_desc desc; struct power_supply *charger; + bool initial_detection; }; /* @@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work) dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n", usb_role_string(info->last_role), usb_role_string(role), id, vbus); - if (info->last_role == role) { + if (!info->initial_detection && info->last_role == role) { dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role)); return; } + info->initial_detection = false; + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); @@ -273,6 +276,7 @@ static int usb_conn_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); /* Perform initial detection */ + info->initial_detection = true; usb_conn_queue_dwork(info, 0); return 0; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 2e8c28f979b9..3b080d49dad2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1887,6 +1887,11 @@ static int dwc3_remove(struct platform_device *pdev) pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); + /* + * HACK: Clear the driver data, which is currently accessed by parent + * glue drivers, before allowing the parent to suspend. + */ + platform_set_drvdata(pdev, NULL); pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); @@ -1939,9 +1944,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) case DWC3_GCTL_PRTCAP_DEVICE: if (pm_runtime_suspended(dwc->dev)) break; - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; @@ -2019,9 +2022,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) return ret; dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_resume(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); break; case DWC3_GCTL_PRTCAP_HOST: if (!PMSG_IS_AUTO(msg)) { diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 73c0c381e5d0..0180350a2c95 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -306,7 +306,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a122ec1cc393..fc0a344b05f5 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2412,7 +2412,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } -static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) +static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) { u32 reg; u32 timeout = 500; @@ -2431,17 +2431,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) reg &= ~DWC3_DCTL_KEEP_CONNECT; reg |= DWC3_DCTL_RUN_STOP; - if (dwc->has_hibernation) - reg |= DWC3_DCTL_KEEP_CONNECT; - __dwc3_gadget_set_speed(dwc); dwc->pullups_connected = true; } else { reg &= ~DWC3_DCTL_RUN_STOP; - if (dwc->has_hibernation && !suspend) - reg &= ~DWC3_DCTL_KEEP_CONNECT; - dwc->pullups_connected = false; } @@ -2487,7 +2481,22 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) * remaining event generated by the controller while polling for * DSTS.DEVCTLHLT. */ - return dwc3_gadget_run_stop(dwc, false, false); + return dwc3_gadget_run_stop(dwc, false); +} + +static int dwc3_gadget_soft_connect(struct dwc3 *dwc) +{ + /* + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. + */ + dwc3_core_soft_reset(dwc); + + dwc3_event_buffers_setup(dwc); + __dwc3_gadget_start(dwc); + return dwc3_gadget_run_stop(dwc, true); } static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) @@ -2540,21 +2549,12 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) return 0; } - if (!is_on) { - ret = dwc3_gadget_soft_disconnect(dwc); - } else { - /* - * In the Synopsys DWC_usb31 1.90a programming guide section - * 4.1.9, it specifies that for a reconnect after a - * device-initiated disconnect requires a core soft reset - * (DCTL.CSftRst) before enabling the run/stop bit. - */ - dwc3_core_soft_reset(dwc); + synchronize_irq(dwc->irq_gadget); - dwc3_event_buffers_setup(dwc); - __dwc3_gadget_start(dwc); - ret = dwc3_gadget_run_stop(dwc, true, false); - } + if (!is_on) + ret = dwc3_gadget_soft_disconnect(dwc); + else + ret = dwc3_gadget_soft_connect(dwc); pm_runtime_put(dwc->dev); @@ -3529,7 +3529,7 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep, * streams are updated, and the device controller will not be * triggered to generate ERDY to move the next stream data. To * workaround this and maintain compatibility with various - * hosts, force to reinitate the stream until the host is ready + * hosts, force to reinitiate the stream until the host is ready * instead of waiting for the host to prime the endpoint. */ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) { @@ -4051,30 +4051,6 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, dwc->link_state = next; } -static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, - unsigned int evtinfo) -{ - unsigned int is_ss = evtinfo & BIT(4); - - /* - * WORKAROUND: DWC3 revison 2.20a with hibernation support - * have a known issue which can cause USB CV TD.9.23 to fail - * randomly. - * - * Because of this issue, core could generate bogus hibernation - * events which SW needs to ignore. - * - * Refers to: - * - * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 - * Device Fallback from SuperSpeed - */ - if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) - return; - - /* enter hibernation here */ -} - static void dwc3_gadget_interrupt(struct dwc3 *dwc, const struct dwc3_event_devt *event) { @@ -4092,11 +4068,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, dwc3_gadget_wakeup_interrupt(dwc); break; case DWC3_DEVICE_EVENT_HIBER_REQ: - if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, - "unexpected hibernation event\n")) - break; - - dwc3_gadget_hibernation_interrupt(dwc, event->event_info); + dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n"); break; case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); @@ -4204,9 +4176,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) u32 reg; if (pm_runtime_suspended(dwc->dev)) { + dwc->pending_events = true; + /* + * Trigger runtime resume. The get() function will be balanced + * after processing the pending events in dwc3_process_pending + * events(). + */ pm_runtime_get(dwc->dev); disable_irq_nosync(dwc->irq_gadget); - dwc->pending_events = true; return IRQ_HANDLED; } @@ -4436,44 +4413,48 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { + unsigned long flags; + int ret; + if (!dwc->gadget_driver) return 0; - dwc3_gadget_run_stop(dwc, false, false); + ret = dwc3_gadget_soft_disconnect(dwc); + if (ret) + goto err; + + spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); - __dwc3_gadget_stop(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); return 0; + +err: + /* + * Attempt to reset the controller's state. Likely no + * communication can be established until the host + * performs a port reset. + */ + if (dwc->softconnect) + dwc3_gadget_soft_connect(dwc); + + return ret; } int dwc3_gadget_resume(struct dwc3 *dwc) { - int ret; - if (!dwc->gadget_driver || !dwc->softconnect) return 0; - ret = __dwc3_gadget_start(dwc); - if (ret < 0) - goto err0; - - ret = dwc3_gadget_run_stop(dwc, true, false); - if (ret < 0) - goto err1; - - return 0; - -err1: - __dwc3_gadget_stop(dwc); - -err0: - return ret; + return dwc3_gadget_soft_connect(dwc); } void dwc3_gadget_process_pending_events(struct dwc3 *dwc) { if (dwc->pending_events) { dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); + dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf); + pm_runtime_put(dwc->dev); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index a8d1e8b192c5..f975dc03a190 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -915,8 +915,11 @@ static void __gs_console_push(struct gs_console *cons) } req->length = size; + + spin_unlock_irq(&cons->lock); if (usb_ep_queue(ep, req, GFP_ATOMIC)) req->length = 0; + spin_lock_irq(&cons->lock); } static void gs_console_work(struct work_struct *work) diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 7e4ce0e7e05a..dcc4778d1ae9 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data) rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); - usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); + if (rc == USB_STOR_XFER_GOOD) + usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } @@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us) static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; - unsigned char status[2]; + unsigned char *status = us->iobuf; + int rc; - alauda_get_media_status(us, status); + rc = alauda_get_media_status(us, status); + if (rc != USB_STOR_XFER_GOOD) { + status[0] = 0xF0; /* Pretend there's no media */ + status[1] = 0; + } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4cf5c4f1f477..edd1d3b0a5f8 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5245,6 +5245,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) /* Do nothing, vbus drop expected */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); @@ -5291,6 +5295,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) case SNK_DEBOUNCED: /*Do nothing, still waiting for VSAFE5V for connect */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; default: if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) tcpm_set_state(port, SNK_UNATTACHED, 0); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 30ae4237f3dd..564864f039d2 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -879,10 +879,10 @@ static void vduse_dev_irq_inject(struct work_struct *work) { struct vduse_dev *dev = container_of(work, struct vduse_dev, inject); - spin_lock_irq(&dev->irq_lock); + spin_lock_bh(&dev->irq_lock); if (dev->config_cb.callback) dev->config_cb.callback(dev->config_cb.private); - spin_unlock_irq(&dev->irq_lock); + spin_unlock_bh(&dev->irq_lock); } static void vduse_vq_irq_inject(struct work_struct *work) @@ -890,10 +890,10 @@ static void vduse_vq_irq_inject(struct work_struct *work) struct vduse_virtqueue *vq = container_of(work, struct vduse_virtqueue, inject); - spin_lock_irq(&vq->irq_lock); + spin_lock_bh(&vq->irq_lock); if (vq->ready && vq->cb.callback) vq->cb.callback(vq->cb.private); - spin_unlock_irq(&vq->irq_lock); + spin_unlock_bh(&vq->irq_lock); } static int vduse_dev_queue_irq_work(struct vduse_dev *dev, diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c index a4d05b1b17d7..665ef7a0a249 100644 --- a/drivers/video/fbdev/core/sysimgblt.c +++ b/drivers/video/fbdev/core/sysimgblt.c @@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, { u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; u32 ppw = 32/bpp, spitch = (image->width + 7)/8; - u32 bit_mask, end_mask, eorx, shift; - const char *s = image->data, *src; + u32 bit_mask, eorx, shift; + const u8 *s = image->data, *src; u32 *dst; - const u32 *tab = NULL; + const u32 *tab; + size_t tablen; + u32 colortab[16]; int i, j, k; switch (bpp) { case 8: tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le; + tablen = 16; break; case 16: tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le; + tablen = 4; break; case 32: - default: tab = cfb_tab32; + tablen = 2; break; + default: + return; } for (i = ppw-1; i--; ) { @@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, eorx = fgx ^ bgx; k = image->width/ppw; + for (i = 0; i < tablen; ++i) + colortab[i] = (tab[i] & eorx) ^ bgx; + for (i = image->height; i--; ) { dst = dst1; shift = 8; src = s; - for (j = k; j--; ) { + /* + * Manually unroll the per-line copying loop for better + * performance. This works until we processed the last + * completely filled source byte (inclusive). + */ + switch (ppw) { + case 4: /* 8 bpp */ + for (j = k; j >= 2; j -= 2, ++src) { + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 2: /* 16 bpp */ + for (j = k; j >= 4; j -= 4, ++src) { + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 1: /* 32 bpp */ + for (j = k; j >= 8; j -= 8, ++src) { + *dst++ = colortab[(*src >> 7) & bit_mask]; + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 5) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 3) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 1) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + } + + /* + * For image widths that are not a multiple of 8, there + * are trailing pixels left on the current line. Print + * them as well. + */ + for (; j--; ) { shift -= ppw; - end_mask = tab[(*src >> shift) & bit_mask]; - *dst++ = (end_mask & eorx) ^ bgx; + *dst++ = colortab[(*src >> shift) & bit_mask]; if (!shift) { shift = 8; - src++; + ++src; } } + dst1 += p->fix.line_length; s += spitch; } diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 061a105afb86..27c3ee5df8de 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -518,7 +518,9 @@ static int mmphw_probe(struct platform_device *pdev) ret = -ENOENT; goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed; /* init global regs */ ctrl_set_default(ctrl); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index fe696aafaed8..f4d43d60d710 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -572,9 +572,8 @@ static void virtio_mmio_release_dev(struct device *_d) struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - struct platform_device *pdev = vm_dev->pdev; - devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); } /* Platform device */ @@ -585,7 +584,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) unsigned long magic; int rc; - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 1e327fb1ad20..0141858188c5 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -89,7 +89,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev) sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) { return efch_mmio; - } else if (dev->vendor == PCI_VENDOR_ID_AMD && + } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) && ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && dev->revision >= 0x41) || (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && @@ -561,6 +561,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = { PCI_ANY_ID, }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, + PCI_ANY_ID, }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl); diff --git a/fs/attr.c b/fs/attr.c index 0ca14cbd4b8b..28e953e86960 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct user_namespace *mnt_userns, return ATTR_KILL_SGID; return 0; } +EXPORT_SYMBOL(setattr_should_drop_sgid); /** * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index d24cef671c1a..4ca6828586af 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1475,11 +1475,14 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; + trace_btrfs_add_unused_block_group(bg); spin_lock(&fs_info->unused_bgs_lock); if (list_empty(&bg->bg_list)) { btrfs_get_block_group(bg); - trace_btrfs_add_unused_block_group(bg); list_add_tail(&bg->bg_list, &fs_info->unused_bgs); + } else { + /* Pull out the block group from the reclaim_bgs list. */ + list_move_tail(&bg->bg_list, &fs_info->unused_bgs); } spin_unlock(&fs_info->unused_bgs_lock); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3c0b3b4ec5ad..4e35c6fb7be7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1437,7 +1437,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) goto fail; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && - !btrfs_is_data_reloc_root(root)) { + !btrfs_is_data_reloc_root(root) && + is_fstree(root->root_key.objectid)) { set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 750c1ff9947d..597cc2607481 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4334,8 +4334,11 @@ have_block_group: ret = 0; } - if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) + if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { + if (!cache_block_group_error) + cache_block_group_error = -EIO; goto loop; + } bg_ret = NULL; ret = do_allocation(block_group, &ffe_ctl, &bg_ret); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7bd704779a99..f9f6dfbc86bc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4844,11 +4844,12 @@ retry: } /* - * the filesystem may choose to bump up nr_to_write. + * The filesystem may choose to bump up nr_to_write. * We have to make sure to honor the new nr_to_write - * at any time + * at any time. */ - nr_to_write_done = wbc->nr_to_write <= 0; + nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && + wbc->nr_to_write <= 0); } pagevec_release(&pvec); cond_resched(); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index becf3396d533..dd8d47958a81 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1905,7 +1905,39 @@ again: err = PTR_ERR(root); break; } - ASSERT(root->reloc_root == reloc_root); + + if (unlikely(root->reloc_root != reloc_root)) { + if (root->reloc_root) { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + root->reloc_root->root_key.objectid, + root->reloc_root->root_key.type, + root->reloc_root->root_key.offset, + btrfs_root_generation( + &root->reloc_root->root_item), + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } else { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } + list_add(&reloc_root->root_list, &reloc_roots); + btrfs_put_root(root); + btrfs_abort_transaction(trans, -EUCLEAN); + if (!err) + err = -EUCLEAN; + break; + } /* * set reference count to 1, so btrfs_recover_relocation @@ -1978,7 +2010,7 @@ again: root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - if (IS_ERR(root)) { + if (WARN_ON(IS_ERR(root))) { /* * For recovery we read the fs roots on mount, * and if we didn't find the root then we marked @@ -1987,17 +2019,14 @@ again: * memory. However there's no reason we can't * handle the error properly here just in case. */ - ASSERT(0); ret = PTR_ERR(root); goto out; } - if (root->reloc_root != reloc_root) { + if (WARN_ON(root->reloc_root != reloc_root)) { /* - * This is actually impossible without something - * going really wrong (like weird race condition - * or cosmic rays). + * This can happen if on-disk metadata has some + * corruption, e.g. bad reloc tree key offset. */ - ASSERT(0); ret = -EINVAL; goto out; } diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index a84d2d489510..bd71c7369794 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -442,6 +442,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, btrfs_item_key_to_cpu(leaf, &item_key, slot); is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY); + /* + * Bad rootid for reloc trees. + * + * Reloc trees are only for subvolume trees, other trees only need + * to be COWed to be relocated. + */ + if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID && + !is_fstree(key->offset))) { + generic_err(leaf, slot, + "invalid reloc tree for root %lld, root id is not a subvolume tree", + key->offset); + return -EUCLEAN; + } + /* No such tree id */ if (unlikely(key->objectid == 0)) { if (is_root_item) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5e191860e8a8..0e9236a745b8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4636,8 +4636,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) } } - BUG_ON(fs_info->balance_ctl || - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0dc8871a4b66..a0b6ae02a70b 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4607,7 +4607,7 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); - if (mdsc->stopping) + if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) return; mutex_lock(&mdsc->mutex); @@ -4786,7 +4786,7 @@ void send_flush_mdlog(struct ceph_mds_session *s) void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); - mdsc->stopping = 1; + mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 2667350eb72c..cd943842f0a3 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -370,6 +370,11 @@ struct cap_wait { int want; }; +enum { + CEPH_MDSC_STOPPING_BEGIN = 1, + CEPH_MDSC_STOPPING_FLUSHED = 2, +}; + /* * mds client state */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 202ddde3d62a..1723ec21cd47 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1227,6 +1227,16 @@ static void ceph_kill_sb(struct super_block *s) ceph_mdsc_pre_umount(fsc->mdsc); flush_fs_workqueues(fsc); + /* + * Though the kill_anon_super() will finally trigger the + * sync_filesystem() anyway, we still need to do it here + * and then bump the stage of shutdown to stop the work + * queue as earlier as possible. + */ + sync_filesystem(s); + + fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED; + kill_anon_super(s); fsc->client->extra_mon_dispatch = NULL; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c9481289266c..b5ae209539ff 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -848,11 +848,11 @@ struct dentry * cifs_smb3_do_mount(struct file_system_type *fs_type, int flags, struct smb3_fs_context *old_ctx) { - int rc; - struct super_block *sb = NULL; - struct cifs_sb_info *cifs_sb = NULL; struct cifs_mnt_data mnt_data; + struct cifs_sb_info *cifs_sb; + struct super_block *sb; struct dentry *root; + int rc; /* * Prints in Kernel / CIFS log the attempted mount operation @@ -863,11 +863,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, else cifs_info("Attempting to mount %s\n", old_ctx->UNC); - cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); - if (cifs_sb == NULL) { - root = ERR_PTR(-ENOMEM); - goto out; - } + cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); + if (!cifs_sb) + return ERR_PTR(-ENOMEM); cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (!cifs_sb->ctx) { @@ -910,10 +908,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); if (IS_ERR(sb)) { - root = ERR_CAST(sb); cifs_umount(cifs_sb); - cifs_sb = NULL; - goto out; + return ERR_CAST(sb); } if (sb->s_root) { @@ -944,13 +940,9 @@ out_super: deactivate_locked_super(sb); return root; out: - if (cifs_sb) { - if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */ - kfree(cifs_sb->prepath); - smb3_cleanup_fs_context(cifs_sb->ctx); - kfree(cifs_sb); - } - } + kfree(cifs_sb->prepath); + smb3_cleanup_fs_context(cifs_sb->ctx); + kfree(cifs_sb); return root; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e65fbae9e804..9e8a69f9421e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4671,9 +4671,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page, io_error: kunmap(page); - unlock_page(page); read_complete: + unlock_page(page); return rc; } @@ -4865,9 +4865,11 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cinode = CIFS_I(inode); - struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct TCP_Server_Info *server = tcon->ses->server; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; + struct tcon_link *tlink; int rc = 0; bool purge_cache = false, oplock_break_cancelled; __u64 persistent_fid, volatile_fid; @@ -4876,6 +4878,12 @@ void cifs_oplock_break(struct work_struct *work) wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + goto out; + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, cfile->oplock_epoch, &purge_cache); @@ -4925,18 +4933,19 @@ oplock_break_ack: /* * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require * an acknowledgment to be sent when the file has already been closed. - * check for server null, since can race with kill_sb calling tree disconnect. */ spin_lock(&cinode->open_file_lock); - if (tcon->ses && tcon->ses->server && !oplock_break_cancelled && - !list_empty(&cinode->openFileList)) { + /* check list empty since can race with kill_sb calling tree disconnect */ + if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { spin_unlock(&cinode->open_file_lock); - rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, - volatile_fid, net_fid, cinode); + rc = server->ops->oplock_response(tcon, persistent_fid, + volatile_fid, net_fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } else spin_unlock(&cinode->open_file_lock); + cifs_put_tlink(tlink); +out: cifs_done_oplock_break(cinode); } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 862cb7a353c1..b9829b873bf2 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb) void dlm_scan_timeout(struct dlm_ls *ls) { struct dlm_rsb *r; - struct dlm_lkb *lkb; + struct dlm_lkb *lkb = NULL, *iter; int do_cancel, do_warn; s64 wait_us; @@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls) do_cancel = 0; do_warn = 0; mutex_lock(&ls->ls_timeout_mutex); - list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { + list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) { wait_us = ktime_to_us(ktime_sub(ktime_get(), - lkb->lkb_timestamp)); + iter->lkb_timestamp)); - if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && - wait_us >= (lkb->lkb_timeout_cs * 10000)) + if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) && + wait_us >= (iter->lkb_timeout_cs * 10000)) do_cancel = 1; - if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && + if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && wait_us >= dlm_config.ci_timewarn_cs * 10000) do_warn = 1; if (!do_cancel && !do_warn) continue; - hold_lkb(lkb); + hold_lkb(iter); + lkb = iter; break; } mutex_unlock(&ls->ls_timeout_mutex); - if (!do_cancel && !do_warn) + if (!lkb) break; r = lkb->lkb_resource; @@ -5239,21 +5240,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { - struct dlm_lkb *lkb; - int found = 0; + struct dlm_lkb *lkb = NULL, *iter; mutex_lock(&ls->ls_waiters_mutex); - list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { - if (lkb->lkb_flags & DLM_IFL_RESEND) { - hold_lkb(lkb); - found = 1; + list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { + if (iter->lkb_flags & DLM_IFL_RESEND) { + hold_lkb(iter); + lkb = iter; break; } } mutex_unlock(&ls->ls_waiters_mutex); - if (!found) - lkb = NULL; return lkb; } @@ -5912,37 +5910,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, int mode, uint32_t flags, void *name, unsigned int namelen, unsigned long timeout_cs, uint32_t *lkid) { - struct dlm_lkb *lkb; + struct dlm_lkb *lkb = NULL, *iter; struct dlm_user_args *ua; int found_other_mode = 0; - int found = 0; int rv = 0; mutex_lock(&ls->ls_orphans_mutex); - list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) { - if (lkb->lkb_resource->res_length != namelen) + list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { + if (iter->lkb_resource->res_length != namelen) continue; - if (memcmp(lkb->lkb_resource->res_name, name, namelen)) + if (memcmp(iter->lkb_resource->res_name, name, namelen)) continue; - if (lkb->lkb_grmode != mode) { + if (iter->lkb_grmode != mode) { found_other_mode = 1; continue; } - found = 1; - list_del_init(&lkb->lkb_ownqueue); - lkb->lkb_flags &= ~DLM_IFL_ORPHAN; - *lkid = lkb->lkb_id; + lkb = iter; + list_del_init(&iter->lkb_ownqueue); + iter->lkb_flags &= ~DLM_IFL_ORPHAN; + *lkid = iter->lkb_id; break; } mutex_unlock(&ls->ls_orphans_mutex); - if (!found && found_other_mode) { + if (!lkb && found_other_mode) { rv = -EAGAIN; goto out; } - if (!found) { + if (!lkb) { rv = -ENOENT; goto out; } diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index f3482e936cc2..28735e8c5e20 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -80,8 +80,7 @@ static void send_op(struct plock_op *op) abandoned waiter. So, we have to insert the unlock-close when the lock call is interrupted. */ -static void do_unlock_close(struct dlm_ls *ls, u64 number, - struct file *file, struct file_lock *fl) +static void do_unlock_close(const struct dlm_plock_info *info) { struct plock_op *op; @@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number, return; op->info.optype = DLM_PLOCK_OP_UNLOCK; - op->info.pid = fl->fl_pid; - op->info.fsid = ls->ls_global_id; - op->info.number = number; + op->info.pid = info->pid; + op->info.fsid = info->fsid; + op->info.number = info->number; op->info.start = 0; op->info.end = OFFSET_MAX; - if (fl->fl_lmops && fl->fl_lmops->lm_grant) - op->info.owner = (__u64) fl->fl_pid; - else - op->info.owner = (__u64)(long) fl->fl_owner; + op->info.owner = info->owner; op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); @@ -161,13 +157,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = wait_event_killable(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { - log_debug(ls, "%s: wait killed %llx", __func__, - (unsigned long long)number); spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); + log_debug(ls, "%s: wait interrupted %x %llx pid %d", + __func__, ls->ls_global_id, + (unsigned long long)number, op->info.pid); dlm_release_plock_op(op); - do_unlock_close(ls, number, file, fl); + do_unlock_close(&op->info); goto out; } @@ -408,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else - list_move(&op->list, &recv_list); + list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); @@ -433,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { + struct plock_op *op = NULL, *iter; struct dlm_plock_info info; - struct plock_op *op; - int found = 0, do_callback = 0; + int do_callback = 0; if (count != sizeof(info)) return -EINVAL; @@ -446,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, if (check_version(&info)) return -EINVAL; + /* + * The results for waiting ops (SETLKW) can be returned in any + * order, so match all fields to find the op. The results for + * non-waiting ops are returned in the order that they were sent + * to userspace, so match the result with the first non-waiting op. + */ spin_lock(&ops_lock); - list_for_each_entry(op, &recv_list, list) { - if (op->info.fsid == info.fsid && - op->info.number == info.number && - op->info.owner == info.owner) { - list_del_init(&op->list); - memcpy(&op->info, &info, sizeof(info)); - if (op->data) - do_callback = 1; - else - op->done = 1; - found = 1; - break; + if (info.wait) { + list_for_each_entry(iter, &recv_list, list) { + if (iter->info.fsid == info.fsid && + iter->info.number == info.number && + iter->info.owner == info.owner && + iter->info.pid == info.pid && + iter->info.start == info.start && + iter->info.end == info.end && + iter->info.ex == info.ex && + iter->info.wait) { + op = iter; + break; + } + } + } else { + list_for_each_entry(iter, &recv_list, list) { + if (!iter->info.wait) { + op = iter; + break; + } } } + + if (op) { + /* Sanity check that op and info match. */ + if (info.wait) + WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); + else + WARN_ON(op->info.fsid != info.fsid || + op->info.number != info.number || + op->info.owner != info.owner || + op->info.optype != info.optype); + + list_del_init(&op->list); + memcpy(&op->info, &info, sizeof(info)); + if (op->data) + do_callback = 1; + else + op->done = 1; + } spin_unlock(&ops_lock); - if (found) { + if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else - log_print("dev_write no op %x %llx", info.fsid, - (unsigned long long)info.number); + log_print("%s: no op %x %llx", __func__, + info.fsid, (unsigned long long)info.number); return count; } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 8928e99dfd47..df18f38a0273 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r) static void recover_lvb(struct dlm_rsb *r) { - struct dlm_lkb *lkb, *high_lkb = NULL; + struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; - int big_lock_exists = 0; int lvblen = r->res_ls->ls_lvblen; if (!rsb_flag(r, RSB_NEW_MASTER2) && @@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r) /* we are the new master, so figure out if VALNOTVALID should be set, and set the rsb lvb from the best lkb available. */ - list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } - list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } @@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r) goto out; /* lvb is invalidated if only NL/CR locks remain */ - if (!big_lock_exists) + if (!big_lkb) rsb_set_flag(r, RSB_VALNOTVALID); if (!r->res_lvbptr) { @@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r) goto out; } - if (big_lock_exists) { - r->res_lvbseq = lkb->lkb_lvbseq; - memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); + if (big_lkb) { + r->res_lvbseq = big_lkb->lkb_lvbseq; + memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index e2113e0a848c..1dce6b4e9088 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, } sbi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1; - sbi->vol_amap = kmalloc_array(sbi->map_sectors, + sbi->vol_amap = kvmalloc_array(sbi->map_sectors, sizeof(struct buffer_head *), GFP_KERNEL); if (!sbi->vol_amap) return -ENOMEM; @@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, while (j < i) brelse(sbi->vol_amap[j++]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); sbi->vol_amap = NULL; return -EIO; } @@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi) for (i = 0; i < sbi->map_sectors; i++) __brelse(sbi->vol_amap[i]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); } int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 3940a56902dd..f6dd4fc8eaf4 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, { int i; struct exfat_entry_set_cache *es; + unsigned int uni_len = 0, len; es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES); if (!es) @@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, if (exfat_get_entry_type(ep) != TYPE_EXTEND) break; - exfat_extract_uni_name(ep, uniname); + len = exfat_extract_uni_name(ep, uniname); + uni_len += len; + if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH) + break; uniname += EXFAT_FILE_NAME_LEN; } @@ -211,7 +215,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb) exfat_init_namebuf(nb); } -/* skip iterating emit_dots when dir is empty */ +/* + * Before calling dir_emit*(), sbi->s_lock should be released + * because page fault can occur in dir_emit*(). + */ #define ITER_POS_FILLED_DOTS (2) static int exfat_iterate(struct file *filp, struct dir_context *ctx) { @@ -226,11 +233,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) int err = 0, fake_offset = 0; exfat_init_namebuf(nb); - mutex_lock(&EXFAT_SB(sb)->s_lock); cpos = ctx->pos; if (!dir_emit_dots(filp, ctx)) - goto unlock; + goto out; if (ctx->pos == ITER_POS_FILLED_DOTS) { cpos = 0; @@ -242,16 +248,18 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); if (err) - goto unlock; + goto out; get_new: + mutex_lock(&EXFAT_SB(sb)->s_lock); + if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode)) goto end_of_dir; err = exfat_readdir(inode, &cpos, &de); if (err) { /* - * At least we tried to read a sector. Move cpos to next sector - * position (should be aligned). + * At least we tried to read a sector. + * Move cpos to next sector position (should be aligned). */ if (err == -EIO) { cpos += 1 << (sb->s_blocksize_bits); @@ -274,16 +282,10 @@ get_new: inum = iunique(sb, EXFAT_ROOT_INO); } - /* - * Before calling dir_emit(), sb_lock should be released. - * Because page fault can occur in dir_emit() when the size - * of buffer given from user is larger than one page size. - */ mutex_unlock(&EXFAT_SB(sb)->s_lock); if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum, (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) - goto out_unlocked; - mutex_lock(&EXFAT_SB(sb)->s_lock); + goto out; ctx->pos = cpos; goto get_new; @@ -291,9 +293,8 @@ end_of_dir: if (!cpos && fake_offset) cpos = ITER_POS_FILLED_DOTS; ctx->pos = cpos; -unlock: mutex_unlock(&EXFAT_SB(sb)->s_lock); -out_unlocked: +out: /* * To improve performance, free namebuf after unlock sb_lock. * If namebuf is not allocated, this function do nothing @@ -1035,7 +1036,8 @@ rewind: if (entry_type == TYPE_EXTEND) { unsigned short entry_uniname[16], unichar; - if (step != DIRENT_STEP_NAME) { + if (step != DIRENT_STEP_NAME || + name_len >= MAX_NAME_LENGTH) { step = DIRENT_STEP_FILE; continue; } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index a610c096f3a9..5207ce805a39 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -70,10 +70,7 @@ struct mb_cache; * second extended-fs super-block data in memory */ struct ext2_sb_info { - unsigned long s_frag_size; /* Size of a fragment in bytes */ - unsigned long s_frags_per_block;/* Number of fragments per block */ unsigned long s_inodes_per_block;/* Number of inodes per block */ - unsigned long s_frags_per_group;/* Number of fragments in a group */ unsigned long s_blocks_per_group;/* Number of blocks in a group */ unsigned long s_inodes_per_group;/* Number of inodes in a group */ unsigned long s_itb_per_group; /* Number of inode table blocks per group */ @@ -188,15 +185,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb) #define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino) /* - * Macro-instructions used to manage fragments - */ -#define EXT2_MIN_FRAG_SIZE 1024 -#define EXT2_MAX_FRAG_SIZE 4096 -#define EXT2_MIN_FRAG_LOG_SIZE 10 -#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size) -#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block) - -/* * Structure of a blocks group descriptor */ struct ext2_group_desc diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 486a43e34795..81798b7cbde2 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -670,10 +670,9 @@ static int ext2_setup_super (struct super_block * sb, es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); if (test_opt (sb, DEBUG)) - ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " + ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize, - sbi->s_frag_size, sbi->s_groups_count, EXT2_BLOCKS_PER_GROUP(sb), EXT2_INODES_PER_GROUP(sb), @@ -1012,14 +1011,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) } } - sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << - le32_to_cpu(es->s_log_frag_size); - if (sbi->s_frag_size == 0) - goto cantfind_ext2; - sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; - sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); - sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); @@ -1045,11 +1037,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (sb->s_blocksize != sbi->s_frag_size) { + if (es->s_log_frag_size != es->s_log_block_size) { ext2_msg(sb, KERN_ERR, - "error: fragsize %lu != blocksize %lu" - "(not supported yet)", - sbi->s_frag_size, sb->s_blocksize); + "error: fragsize log %u != blocksize log %u", + le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits); goto failed_mount; } @@ -1059,12 +1050,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sbi->s_blocks_per_group); goto failed_mount; } - if (sbi->s_frags_per_group > sb->s_blocksize * 8) { - ext2_msg(sb, KERN_ERR, - "error: #fragments per group too big: %lu", - sbi->s_frags_per_group); - goto failed_mount; - } if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, diff --git a/fs/file.c b/fs/file.c index 1501bbf6306e..69a51d37b66d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1062,12 +1062,28 @@ unsigned long __fdget_raw(unsigned int fd) return __fget_light(fd, 0); } +/* + * Try to avoid f_pos locking. We only need it if the + * file is marked for FMODE_ATOMIC_POS, and it can be + * accessed multiple ways. + * + * Always do it for directories, because pidfd_getfd() + * can make a file accessible even if it otherwise would + * not be, and for directories this is a correctness + * issue, not a "POSIX requirement". + */ +static inline bool file_needs_f_pos_lock(struct file *file) +{ + return (file->f_mode & FMODE_ATOMIC_POS) && + (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode)); +} + unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); - if (file && (file->f_mode & FMODE_ATOMIC_POS)) { + if (file && file_needs_f_pos_lock(file)) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index ca6ee1cbccd5..51b44da4a0d6 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -980,7 +980,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) { struct gfs2_sbd *sdp = root->d_sb->s_fs_info; struct gfs2_args *args = &sdp->sd_args; - int val; + unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; + + spin_lock(&sdp->sd_tune.gt_spin); + logd_secs = sdp->sd_tune.gt_logd_secs; + quota_quantum = sdp->sd_tune.gt_quota_quantum; + statfs_quantum = sdp->sd_tune.gt_statfs_quantum; + statfs_slow = sdp->sd_tune.gt_statfs_slow; + spin_unlock(&sdp->sd_tune.gt_spin); if (is_ancestor(root, sdp->sd_master_dir)) seq_puts(s, ",meta"); @@ -1035,17 +1042,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) } if (args->ar_discard) seq_puts(s, ",discard"); - val = sdp->sd_tune.gt_logd_secs; - if (val != 30) - seq_printf(s, ",commit=%d", val); - val = sdp->sd_tune.gt_statfs_quantum; - if (val != 30) - seq_printf(s, ",statfs_quantum=%d", val); - else if (sdp->sd_tune.gt_statfs_slow) + if (logd_secs != 30) + seq_printf(s, ",commit=%d", logd_secs); + if (statfs_quantum != 30) + seq_printf(s, ",statfs_quantum=%d", statfs_quantum); + else if (statfs_slow) seq_puts(s, ",statfs_quantum=0"); - val = sdp->sd_tune.gt_quota_quantum; - if (val != 60) - seq_printf(s, ",quota_quantum=%d", val); + if (quota_quantum != 60) + seq_printf(s, ",quota_quantum=%d", quota_quantum); if (args->ar_statfs_percent) seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); if (args->ar_errors != GFS2_ERRORS_DEFAULT) { diff --git a/fs/internal.h b/fs/internal.h index 1ff8cfc94467..d241eaa0c58b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -235,5 +235,3 @@ int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry, /* * fs/attr.c */ -int setattr_should_drop_sgid(struct user_namespace *mnt_userns, - const struct inode *inode); diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index d2aba55833f9..fc6989e7a8c5 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -27,7 +27,7 @@ * * Called with j_list_lock held. */ -static inline void __buffer_unlink_first(struct journal_head *jh) +static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; @@ -41,23 +41,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh) } /* - * Unlink a buffer from a transaction checkpoint(io) list. - * - * Called with j_list_lock held. - */ -static inline void __buffer_unlink(struct journal_head *jh) -{ - transaction_t *transaction = jh->b_cp_transaction; - - __buffer_unlink_first(jh); - if (transaction->t_checkpoint_io_list == jh) { - transaction->t_checkpoint_io_list = jh->b_cpnext; - if (transaction->t_checkpoint_io_list == jh) - transaction->t_checkpoint_io_list = NULL; - } -} - -/* * Check a checkpoint buffer could be release or not. * * Requires j_list_lock @@ -367,49 +350,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal) /* Checkpoint list management */ /* - * journal_clean_one_cp_list - * - * Find all the written-back checkpoint buffers in the given list and - * release them. If 'destroy' is set, clean all buffers unconditionally. - * - * Called with j_list_lock held. - * Returns 1 if we freed the transaction, 0 otherwise. - */ -static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) -{ - struct journal_head *last_jh; - struct journal_head *next_jh = jh; - - if (!jh) - return 0; - - last_jh = jh->b_cpprev; - do { - jh = next_jh; - next_jh = jh->b_cpnext; - - if (!destroy && __cp_buffer_busy(jh)) - return 0; - - if (__jbd2_journal_remove_checkpoint(jh)) - return 1; - /* - * This function only frees up some memory - * if possible so we dont have an obligation - * to finish processing. Bail out if preemption - * requested: - */ - if (need_resched()) - return 0; - } while (jh != last_jh); - - return 0; -} - -/* * journal_shrink_one_cp_list * - * Find 'nr_to_scan' written-back checkpoint buffers in the given list + * Find all the written-back checkpoint buffers in the given list * and try to release them. If the whole transaction is released, set * the 'released' parameter. Return the number of released checkpointed * buffers. @@ -417,15 +360,15 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) * Called with j_list_lock held. */ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, - unsigned long *nr_to_scan, - bool *released) + bool destroy, bool *released) { struct journal_head *last_jh; struct journal_head *next_jh = jh; unsigned long nr_freed = 0; int ret; - if (!jh || *nr_to_scan == 0) + *released = false; + if (!jh) return 0; last_jh = jh->b_cpprev; @@ -433,12 +376,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, jh = next_jh; next_jh = jh->b_cpnext; - (*nr_to_scan)--; - if (__cp_buffer_busy(jh)) - continue; + if (destroy) { + ret = __jbd2_journal_remove_checkpoint(jh); + } else { + ret = jbd2_journal_try_remove_checkpoint(jh); + if (ret < 0) + continue; + } nr_freed++; - ret = __jbd2_journal_remove_checkpoint(jh); if (ret) { *released = true; break; @@ -446,7 +392,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, if (need_resched()) break; - } while (jh != last_jh && *nr_to_scan); + } while (jh != last_jh); return nr_freed; } @@ -464,11 +410,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan) { transaction_t *transaction, *last_transaction, *next_transaction; - bool released; + bool __maybe_unused released; tid_t first_tid = 0, last_tid = 0, next_tid = 0; tid_t tid = 0; unsigned long nr_freed = 0; - unsigned long nr_scanned = *nr_to_scan; + unsigned long freed; again: spin_lock(&journal->j_list_lock); @@ -497,19 +443,11 @@ again: transaction = next_transaction; next_transaction = transaction->t_cpnext; tid = transaction->t_tid; - released = false; - nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list, - nr_to_scan, &released); - if (*nr_to_scan == 0) - break; - if (need_resched() || spin_needbreak(&journal->j_list_lock)) - break; - if (released) - continue; - - nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list, - nr_to_scan, &released); + freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, + false, &released); + nr_freed += freed; + (*nr_to_scan) -= min(*nr_to_scan, freed); if (*nr_to_scan == 0) break; if (need_resched() || spin_needbreak(&journal->j_list_lock)) @@ -530,9 +468,8 @@ again: if (*nr_to_scan && next_tid) goto again; out: - nr_scanned -= *nr_to_scan; trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, - nr_freed, nr_scanned, next_tid); + nr_freed, next_tid); return nr_freed; } @@ -548,7 +485,7 @@ out: void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) { transaction_t *transaction, *last_transaction, *next_transaction; - int ret; + bool released; transaction = journal->j_checkpoint_transactions; if (!transaction) @@ -559,8 +496,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) do { transaction = next_transaction; next_transaction = transaction->t_cpnext; - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, - destroy); + journal_shrink_one_cp_list(transaction->t_checkpoint_list, + destroy, &released); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if @@ -568,23 +505,12 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) */ if (need_resched()) return; - if (ret) - continue; - /* - * It is essential that we are as careful as in the case of - * t_checkpoint_list with removing the buffer from the list as - * we can possibly see not yet submitted buffers on io_list - */ - ret = journal_clean_one_cp_list(transaction-> - t_checkpoint_io_list, destroy); - if (need_resched()) - return; /* * Stop scanning if we couldn't free the transaction. This * avoids pointless scanning of transactions which still * weren't checkpointed. */ - if (!ret) + if (!released) return; } while (transaction != last_transaction); } @@ -663,7 +589,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) jbd2_journal_put_journal_head(jh); /* Is this transaction empty? */ - if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list) + if (transaction->t_checkpoint_list) return 0; /* @@ -695,6 +621,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) } /* + * Check the checkpoint buffer and try to remove it from the checkpoint + * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if + * it frees the transaction, 0 otherwise. + * + * This function is called with j_list_lock held. + */ +int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) +{ + struct buffer_head *bh = jh2bh(jh); + + if (!trylock_buffer(bh)) + return -EBUSY; + if (buffer_dirty(bh)) { + unlock_buffer(bh); + return -EBUSY; + } + unlock_buffer(bh); + + /* + * Buffer is clean and the IO has finished (we held the buffer + * lock) so the checkpoint is done. We can safely remove the + * buffer from this transaction. + */ + JBUFFER_TRACE(jh, "remove from checkpoint list"); + return __jbd2_journal_remove_checkpoint(jh); +} + +/* * journal_insert_checkpoint: put a committed buffer onto a checkpoint * list so that we know when it is safe to clean the transaction out of * the log. @@ -755,7 +709,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact J_ASSERT(transaction->t_forget == NULL); J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); - J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index ac328e332124..20294c1bbeab 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -1184,8 +1184,7 @@ restart_loop: spin_lock(&journal->j_list_lock); commit_transaction->t_state = T_FINISHED; /* Check if the transaction can be dropped now that we are finished */ - if (commit_transaction->t_checkpoint_list == NULL && - commit_transaction->t_checkpoint_io_list == NULL) { + if (commit_transaction->t_checkpoint_list == NULL) { __jbd2_journal_drop_transaction(journal, commit_transaction); jbd2_journal_free_transaction(commit_transaction); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ce4a5ccadeff..62e68c5b8ec3 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1775,8 +1775,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) * Otherwise, if the buffer has been written to disk, * it is safe to remove the checkpoint and drop it. */ - if (!buffer_dirty(bh)) { - __jbd2_journal_remove_checkpoint(jh); + if (jbd2_journal_try_remove_checkpoint(jh) >= 0) { spin_unlock(&journal->j_list_lock); goto drop; } @@ -2103,20 +2102,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) jh = bh2jh(bh); - if (buffer_locked(bh) || buffer_dirty(bh)) - goto out; - if (jh->b_next_transaction != NULL || jh->b_transaction != NULL) - goto out; + return; spin_lock(&journal->j_list_lock); - if (jh->b_cp_transaction != NULL) { - /* written-back checkpointed metadata buffer */ - JBUFFER_TRACE(jh, "remove from checkpoint list"); - __jbd2_journal_remove_checkpoint(jh); - } + /* Remove written-back checkpointed metadata buffer */ + if (jh->b_cp_transaction != NULL) + jbd2_journal_try_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); -out: return; } diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c index c24674fc1904..8ef9503c4ab9 100644 --- a/fs/ksmbd/smb2misc.c +++ b/fs/ksmbd/smb2misc.c @@ -381,13 +381,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work) } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { - if (command == SMB2_OPLOCK_BREAK_HE && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) { + if (!(command == SMB2_OPLOCK_BREAK_HE && + (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || + le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, - "Illegal request size %d for oplock break\n", - le16_to_cpu(pdu->StructureSize2)); + "Illegal request size %u for command %d\n", + le16_to_cpu(pdu->StructureSize2), command); return 1; } } diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 9f9d07caa57e..0fde3d12b346 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -2309,9 +2309,16 @@ next: break; buf_len -= next; eabuf = (struct smb2_ea_info *)((char *)eabuf + next); - if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) + if (buf_len < sizeof(struct smb2_ea_info)) { + rc = -EINVAL; break; + } + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + le16_to_cpu(eabuf->EaValueLength)) { + rc = -EINVAL; + break; + } } while (next != 0); kfree(attr_name); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c220810c61d1..fbc7304bed56 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -509,20 +509,26 @@ out: return result; } -static void -nfs_direct_join_group(struct list_head *list, struct inode *inode) +static void nfs_direct_join_group(struct list_head *list, struct inode *inode) { - struct nfs_page *req, *next; + struct nfs_page *req, *subreq; list_for_each_entry(req, list, wb_list) { - if (req->wb_head != req || req->wb_this_page == req) + if (req->wb_head != req) continue; - for (next = req->wb_this_page; - next != req->wb_head; - next = next->wb_this_page) { - nfs_list_remove_request(next); - nfs_release_request(next); - } + subreq = req->wb_this_page; + if (subreq == req) + continue; + do { + /* + * Remove subrequests from this list before freeing + * them in the call to nfs_join_page_group(). + */ + if (!list_empty(&subreq->wb_list)) { + nfs_list_remove_request(subreq); + nfs_release_request(subreq); + } + } while ((subreq = subreq->wb_this_page) != req); nfs_join_page_group(req, inode); } } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index e4524635a129..d8f01d222c49 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -731,9 +731,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, if ((attr->ia_valid & ATTR_KILL_SUID) != 0 && inode->i_mode & S_ISUID) inode->i_mode &= ~S_ISUID; - if ((attr->ia_valid & ATTR_KILL_SGID) != 0 && - (inode->i_mode & (S_ISGID | S_IXGRP)) == - (S_ISGID | S_IXGRP)) + if (setattr_should_drop_sgid(&init_user_ns, inode)) inode->i_mode &= ~S_ISGID; if ((attr->ia_valid & ATTR_MODE) != 0) { int mode = attr->ia_mode & S_IALLUGO; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index da94bf2afd07..bc07012741cb 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1339,7 +1339,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, for (i = 0; i < np; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) { - np = i + 1; err = -ENOMEM; goto out; } @@ -1363,8 +1362,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, } while (exception.retry); out: - while (--np >= 0) - __free_page(pages[np]); + while (--i >= 0) + __free_page(pages[i]); kfree(pages); return err; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b1ec9b5d06e5..a21e25cbd451 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5964,9 +5964,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu out_ok: ret = res.acl_len; out_free: - for (i = 0; i < npages; i++) - if (pages[i]) - __free_page(pages[i]); + while (--i >= 0) + __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); kfree(pages); @@ -7153,8 +7152,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) goto out_restart; break; - case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: + if (data->arg.new_lock_owner != 0 && + nfs4_refresh_open_old_stateid(&data->arg.open_stateid, + lsp->ls_state)) + goto out_restart; + if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) + goto out_restart; + fallthrough; + case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: if (data->arg.new_lock_owner != 0) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c8729493df5c..f54ef526f25d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1263,9 +1263,9 @@ static void revoke_delegation(struct nfs4_delegation *dp) WARN_ON(!list_empty(&dp->dl_recall_lru)); if (clp->cl_minorversion) { + spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); - spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d4adc599737d..15a86876e3d9 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -322,7 +322,9 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) iap->ia_mode &= ~S_ISGID; } else { /* set ATTR_KILL_* bits and let VFS handle it */ - iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID); + iap->ia_valid |= ATTR_KILL_SUID; + iap->ia_valid |= + setattr_should_drop_sgid(&init_user_ns, inode); } } } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index c1ab0bfc3ed5..b908216f306d 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1105,9 +1105,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) int __nilfs_mark_inode_dirty(struct inode *inode, int flags) { + struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err; + /* + * Do not dirty inodes after the log writer has been detached + * and its nilfs_root struct has been freed. + */ + if (unlikely(nilfs_purging(nilfs))) + return 0; + err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warn(inode->i_sb, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 5c310eb7dd0c..9e865732d352 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2845,6 +2845,7 @@ void nilfs_detach_log_writer(struct super_block *sb) nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } + set_nilfs_purging(nilfs); /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); @@ -2857,4 +2858,5 @@ void nilfs_detach_log_writer(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); + clear_nilfs_purging(nilfs); } diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 987c8ab02aee..b36ba588ee69 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -29,6 +29,7 @@ enum { THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ THE_NILFS_SB_DIRTY, /* super block is dirty */ + THE_NILFS_PURGING, /* disposing dirty files for cleanup */ }; /** @@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) THE_NILFS_FNS(SB_DIRTY, sb_dirty) +THE_NILFS_FNS(PURGING, purging) /* * Mount option operations diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c index c0c6bcbc8c05..81c22df27c72 100644 --- a/fs/ntfs3/attrlist.c +++ b/fs/ntfs3/attrlist.c @@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) if (!attr->non_res) { lsize = le32_to_cpu(attr->res.data_size); - le = kmalloc(al_aligned(lsize), GFP_NOFS); + le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN); if (!le) { err = -ENOMEM; goto out; @@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) if (err < 0) goto out; - le = kmalloc(al_aligned(lsize), GFP_NOFS); + le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN); if (!le) { err = -ENOMEM; goto out; diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index d24e12d348d4..9a1744955d1c 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -849,6 +849,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) if (err) goto out1; + err = -EINVAL; /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */ while (to_free > 0) { struct ATTRIB *b = arr_move[--nb]; @@ -857,7 +858,8 @@ int ni_create_attr_list(struct ntfs_inode *ni) attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off), b->name_len, asize, name_off); - WARN_ON(!attr); + if (!attr) + goto out1; mi_get_ref(mi, &le_b[nb]->ref); le_b[nb]->id = attr->id; @@ -867,17 +869,20 @@ int ni_create_attr_list(struct ntfs_inode *ni) attr->id = le_b[nb]->id; /* Remove from primary record. */ - WARN_ON(!mi_remove_attr(NULL, &ni->mi, b)); + if (!mi_remove_attr(NULL, &ni->mi, b)) + goto out1; if (to_free <= asize) break; to_free -= asize; - WARN_ON(!nb); + if (!nb) + goto out1; } attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0, lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT); - WARN_ON(!attr); + if (!attr) + goto out1; attr->non_res = 0; attr->flags = 0; @@ -897,9 +902,10 @@ out1: kfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.size = 0; + return err; out: - return err; + return 0; } /* diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 3c823613de97..0ae70010b01d 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -154,7 +154,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, /* Check errors. */ if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || fn * SECTOR_SIZE > bytes) { - return -EINVAL; /* Native chkntfs returns ok! */ + return -E_NTFS_CORRUPT; } /* Get fixup pointer. */ diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 124eba7238fd..7705adc926b8 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1112,6 +1112,12 @@ ok: *node = in; out: + if (err == -E_NTFS_CORRUPT) { + ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); + ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + if (ib != in->index) kfree(ib); diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index fc0eb93c76de..510ed2ea1c48 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -54,6 +54,8 @@ enum utf16_endian; #define E_NTFS_NONRESIDENT 556 /* NTFS specific error code about punch hole. */ #define E_NTFS_NOTALIGNED 557 +/* NTFS specific error code when on-disk struct is corrupted. */ +#define E_NTFS_CORRUPT 558 /* sbi->flags */ diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 41f6e578966b..938fc286963f 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) struct rw_semaphore *rw_lock = NULL; if (is_mounted(sbi)) { - if (!is_mft) { + if (!is_mft && mft_ni) { rw_lock = &mft_ni->file.run_lock; down_read(rw_lock); } @@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) ni_lock(mft_ni); down_write(rw_lock); } - err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run, + err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run, vbo >> sbi->cluster_bits); if (rw_lock) { up_write(rw_lock); @@ -180,6 +180,12 @@ ok: return 0; out: + if (err == -E_NTFS_CORRUPT) { + ntfs_err(sbi->sb, "mft corrupted"); + ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + return err; } diff --git a/fs/open.c b/fs/open.c index e93c33069055..159a2765b7eb 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1126,7 +1126,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ - if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index b2d64f3c974b..08031638bbee 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -32,6 +32,7 @@ struct ovl_sb { }; struct ovl_layer { + /* ovl_free_fs() relies on @mnt being the first member! */ struct vfsmount *mnt; /* Trap in ovl inode cache */ struct inode *trap; @@ -42,6 +43,14 @@ struct ovl_layer { int fsid; }; +/* + * ovl_free_fs() relies on @mnt being the first member when unmounting + * the private mounts created for each layer. Let's check both the + * offset and type. + */ +static_assert(offsetof(struct ovl_layer, mnt) == 0); +static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *)); + struct ovl_path { const struct ovl_layer *layer; struct dentry *dentry; diff --git a/fs/super.c b/fs/super.c index 297630540f43..048576b19af6 100644 --- a/fs/super.c +++ b/fs/super.c @@ -863,6 +863,7 @@ int reconfigure_super(struct fs_context *fc) struct super_block *sb = fc->root->d_sb; int retval; bool remount_ro = false; + bool remount_rw = false; bool force = fc->sb_flags & SB_FORCE; if (fc->sb_flags_mask & ~MS_RMT_MASK) @@ -880,7 +881,7 @@ int reconfigure_super(struct fs_context *fc) bdev_read_only(sb->s_bdev)) return -EACCES; #endif - + remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); } @@ -910,6 +911,14 @@ int reconfigure_super(struct fs_context *fc) if (retval) return retval; } + } else if (remount_rw) { + /* + * We set s_readonly_remount here to protect filesystem's + * reconfigure code from writes from userspace until + * reconfigure finishes. + */ + sb->s_readonly_remount = 1; + smp_wmb(); } if (fc->ops->reconfigure) { diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 5a59d56a2038..1e9c520411f8 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode, */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + sysv_free_block(inode->i_sb, branch[n].key); + break; + } lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 20c93f08c993..95a1d214108a 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask) return (mask >> 8) ? byte : byte + 1; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index dfb46915015b..9c7949ebc159 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1495,7 +1495,7 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 #define DP_LTTPR_COMMON_CAP_SIZE 8 diff --git a/include/linux/clk.h b/include/linux/clk.h index c755732f002b..1b43d18228f4 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); */ bool clk_is_match(const struct clk *p, const struct clk *q); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + #else static inline int clk_notifier_register(struct clk *clk, @@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) return p == q; } +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + #endif #ifdef CONFIG_HAVE_CLK_PREPARE @@ -572,38 +612,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); -/** - * clk_rate_exclusive_get - get exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to get exclusive control over the rate of a - * provider. It prevents any other consumer to execute, even indirectly, - * opereation which could alter the rate of the provider or cause glitches - * - * If exlusivity is claimed more than once on clock, even by the same driver, - * the rate effectively gets locked as exclusivity can't be preempted. - * - * Must not be called from within atomic context. - * - * Returns success (0) or negative errno. - */ -int clk_rate_exclusive_get(struct clk *clk); - -/** - * clk_rate_exclusive_put - release exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to release the exclusivity it previously got - * from clk_rate_exclusive_get() - * - * The caller must balance the number of clk_rate_exclusive_get() and - * clk_rate_exclusive_put() calls. - * - * Must not be called from within atomic context. - */ -void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -963,14 +971,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} - -static inline int clk_rate_exclusive_get(struct clk *clk) -{ - return 0; -} - -static inline void clk_rate_exclusive_put(struct clk *clk) {} - static inline int clk_enable(struct clk *clk) { return 0; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d4c860de9a6a..caf3b95017bf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index d2b9c41c8edf..82fb7e24d1cb 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -56,8 +56,10 @@ extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); -extern void cpuset_read_lock(void); -extern void cpuset_read_unlock(void); +extern void inc_dl_tasks_cs(struct task_struct *task); +extern void dec_dl_tasks_cs(struct task_struct *task); +extern void cpuset_lock(void); +extern void cpuset_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -179,8 +181,10 @@ static inline void cpuset_update_active_cpus(void) static inline void cpuset_wait_for_hotplug(void) { } -static inline void cpuset_read_lock(void) { } -static inline void cpuset_read_unlock(void) { } +static inline void inc_dl_tasks_cs(struct task_struct *task) { } +static inline void dec_dl_tasks_cs(struct task_struct *task) { } +static inline void cpuset_lock(void) { } +static inline void cpuset_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) diff --git a/include/linux/fs.h b/include/linux/fs.h index c0dd2794e1ba..6bba7a58c95c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3135,6 +3135,8 @@ extern struct inode *new_inode(struct super_block *sb); extern void free_inode_nonrcu(struct inode *inode); extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *); extern int file_remove_privs(struct file *); +int setattr_should_drop_sgid(struct user_namespace *mnt_userns, + const struct inode *inode); extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 2c8860e406bd..0417360a6db9 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -53,6 +53,7 @@ } \ if (__sleep_us) \ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) @@ -95,6 +96,7 @@ } \ if (__delay_us) \ udelay(__delay_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d63b8106796e..ade8a6d7acff 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -627,12 +627,6 @@ struct transaction_s struct journal_head *t_checkpoint_list; /* - * Doubly-linked circular list of all buffers submitted for IO while - * checkpointing. [j_list_lock] - */ - struct journal_head *t_checkpoint_io_list; - - /* * Doubly-linked circular list of metadata buffers being * shadowed by log IO. The IO buffers on the iobuf list and * the shadow buffers on this list match each other one for @@ -1447,6 +1441,7 @@ extern void jbd2_journal_commit_transaction(journal_t *); void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); +int jbd2_journal_try_remove_checkpoint(struct journal_head *jh); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); diff --git a/include/linux/objtool.h b/include/linux/objtool.h index a2042c418686..51f5b24af834 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -71,6 +71,23 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -123,6 +140,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -132,12 +156,16 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index cd5b62db9084..e63a63aa47a3 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -17,8 +17,8 @@ #ifdef CONFIG_PM extern int dev_pm_set_wake_irq(struct device *dev, int irq); -extern int dev_pm_set_dedicated_wake_irq(struct device *dev, - int irq); +extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); +extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); extern void dev_pm_enable_wake_irq(struct device *dev); extern void dev_pm_disable_wake_irq(struct device *dev); @@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return 0; } +static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return 0; +} + static inline void dev_pm_clear_wake_irq(struct device *dev) { } diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f34dbd0db795..a84063492c71 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) } /** - * @brief qed_chain_advance_page - + * qed_chain_advance_page(): Advance the next element across pages for a + * linked chain. * - * Advance the next element across pages for a linked chain + * @p_chain: P_chain. + * @p_next_elem: P_next_elem. + * @idx_to_inc: Idx_to_inc. + * @page_to_inc: page_to_inc. * - * @param p_chain - * @param p_next_elem - * @param idx_to_inc - * @param page_to_inc + * Return: Void. */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, @@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain, } while (0) /** - * @brief qed_chain_return_produced - + * qed_chain_return_produced(): A chain in which the driver "Produces" + * elements should use this API + * to indicate previous produced elements + * are now consumed. * - * A chain in which the driver "Produces" elements should use this API - * to indicate previous produced elements are now consumed. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { @@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain) } /** - * @brief qed_chain_produce - + * qed_chain_produce(): A chain in which the driver "Produces" + * elements should use this to get a pointer to + * the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for + * new element. * - * A chain in which the driver "Produces" elements should use this to get - * a pointer to the next element which can be "Produced". It's driver - * responsibility to validate that the chain has room for new element. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to next element + * Return: void*, a pointer to next element. */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { @@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_capacity - - * - * Get the maximum number of BDs in chain + * qed_chain_get_capacity(): Get the maximum number of BDs in chain * - * @param p_chain - * @param num + * @p_chain: Chain. * - * @return number of unusable BDs + * Return: number of unusable BDs. */ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { @@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) } /** - * @brief qed_chain_recycle_consumed - + * qed_chain_recycle_consumed(): Returns an element which was + * previously consumed; + * Increments producers so they could + * be written to FW. * - * Returns an element which was previously consumed; - * Increments producers so they could be written to FW. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { @@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) } /** - * @brief qed_chain_consume - + * qed_chain_consume(): A Chain in which the driver utilizes data written + * by a different source (i.e., FW) should use this to + * access passed buffers. * - * A Chain in which the driver utilizes data written by a different source - * (i.e., FW) should use this to access passed buffers. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to the next buffer written + * Return: void*, a pointer to the next buffer written. */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { @@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) } /** - * @brief qed_chain_reset - Resets the chain to its start state + * qed_chain_reset(): Resets the chain to its start state. + * + * @p_chain: pointer to a previously allocated chain. * - * @param p_chain pointer to a previously allocated chain + * Return Void. */ static inline void qed_chain_reset(struct qed_chain *p_chain) { @@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_last_elem - + * qed_chain_get_last_elem(): Returns a pointer to the last element of the + * chain. * - * Returns a pointer to the last element of the chain + * @p_chain: Chain. * - * @param p_chain - * - * @return void* + * Return: void*. */ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { @@ -563,10 +565,13 @@ out: } /** - * @brief qed_chain_set_prod - sets the prod to the given value + * qed_chain_set_prod(): sets the prod to the given value. + * + * @p_chain: Chain. + * @prod_idx: Prod Idx. + * @p_prod_elem: Prod elem. * - * @param prod_idx - * @param p_prod_elem + * Return Void. */ static inline void qed_chain_set_prod(struct qed_chain *p_chain, u32 prod_idx, void *p_prod_elem) @@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain, } /** - * @brief qed_chain_pbl_zero_mem - set chain memory to 0 + * qed_chain_pbl_zero_mem(): set chain memory to 0. + * + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 850b98991670..f39451aaaeec 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -819,47 +819,47 @@ struct qed_common_cb_ops { struct qed_selftest_ops { /** - * @brief selftest_interrupt - Perform interrupt test + * selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_interrupt)(struct qed_dev *cdev); /** - * @brief selftest_memory - Perform memory test + * selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_memory)(struct qed_dev *cdev); /** - * @brief selftest_register - Perform register test + * selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_register)(struct qed_dev *cdev); /** - * @brief selftest_clock - Perform clock test + * selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); /** - * @brief selftest_nvram - Perform nvram test + * selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_nvram) (struct qed_dev *cdev); }; @@ -927,47 +927,53 @@ struct qed_common_ops { enum qed_hw_err_type err_type); /** - * @brief can_link_change - can the instance change the link or not + * can_link_change(): can the instance change the link or not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return true if link-change is allowed, false otherwise. + * Return: true if link-change is allowed, false otherwise. */ bool (*can_link_change)(struct qed_dev *cdev); /** - * @brief set_link - set links according to params + * set_link(): set links according to params. * - * @param cdev - * @param params - values used to override the default link configuration + * @cdev: Qed dev pointer. + * @params: values used to override the default link configuration. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_link)(struct qed_dev *cdev, struct qed_link_params *params); /** - * @brief get_link - returns the current link state. + * get_link(): returns the current link state. * - * @param cdev - * @param if_link - structure to be filled with current link configuration. + * @cdev: Qed dev pointer. + * @if_link: structure to be filled with current link configuration. + * + * Return: Void. */ void (*get_link)(struct qed_dev *cdev, struct qed_link_output *if_link); /** - * @brief - drains chip in case Tx completions fail to arrive due to pause. + * drain(): drains chip in case Tx completions fail to arrive due to pause. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Int. */ int (*drain)(struct qed_dev *cdev); /** - * @brief update_msglvl - update module debug level + * update_msglvl(): update module debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Debug module. + * @dp_level: Debug level. + * + * Return: Void. */ void (*update_msglvl)(struct qed_dev *cdev, u32 dp_module, @@ -981,70 +987,73 @@ struct qed_common_ops { struct qed_chain *p_chain); /** - * @brief nvm_flash - Flash nvm data. + * nvm_flash(): Flash nvm data. * - * @param cdev - * @param name - file containing the data + * @cdev: Qed dev pointer. + * @name: file containing the data. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*nvm_flash)(struct qed_dev *cdev, const char *name); /** - * @brief nvm_get_image - reads an entire image from nvram + * nvm_get_image(): reads an entire image from nvram. * - * @param cdev - * @param type - type of the request nvram image - * @param buf - preallocated buffer to fill with the image - * @param len - length of the allocated buffer + * @cdev: Qed dev pointer. + * @type: type of the request nvram image. + * @buf: preallocated buffer to fill with the image. + * @len: length of the allocated buffer. * - * @return 0 on success, error otherwise + * Return: 0 on success, error otherwise. */ int (*nvm_get_image)(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len); /** - * @brief set_coalesce - Configure Rx coalesce value in usec + * set_coalesce(): Configure Rx coalesce value in usec. * - * @param cdev - * @param rx_coal - Rx coalesce value in usec - * @param tx_coal - Tx coalesce value in usec - * @param qid - Queue index - * @param sb_id - Status Block Id + * @cdev: Qed dev pointer. + * @rx_coal: Rx coalesce value in usec. + * @tx_coal: Tx coalesce value in usec. + * @handle: Handle. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle); /** - * @brief set_led - Configure LED mode + * set_led() - Configure LED mode. * - * @param cdev - * @param mode - LED mode + * @cdev: Qed dev pointer. + * @mode: LED mode. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); /** - * @brief attn_clr_enable - Prevent attentions from being reasserted + * attn_clr_enable(): Prevent attentions from being reasserted. + * + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable. * - * @param cdev - * @param clr_enable + * Return: Void. */ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_is_32b - doorbell is 32b pr 64b - * @param db_is_user - doorbell recovery addresses are user or kernel space + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Dddress of where db_data is stored. + * @db_width: Doorbell is 32b or 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. + * + * Return: Int. */ int (*db_recovery_add)(struct qed_dev *cdev, void __iomem *db_addr, @@ -1053,114 +1062,130 @@ struct qed_common_ops { enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * db_recovery_del(): remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the - * entry to delete. + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address where db_data is stored. Serves as key for the + * entry to delete. + * + * Return: Int. */ int (*db_recovery_del)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); /** - * @brief recovery_process - Trigger a recovery process + * recovery_process(): Trigger a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_process)(struct qed_dev *cdev); /** - * @brief recovery_prolog - Execute the prolog operations of a recovery process + * recovery_prolog(): Execute the prolog operations of a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_prolog)(struct qed_dev *cdev); /** - * @brief update_drv_state - API to inform the change in the driver state. + * update_drv_state(): API to inform the change in the driver state. * - * @param cdev - * @param active + * @cdev: Qed dev pointer. + * @active: Active * + * Return: Int. */ int (*update_drv_state)(struct qed_dev *cdev, bool active); /** - * @brief update_mac - API to inform the change in the mac address + * update_mac(): API to inform the change in the mac address. * - * @param cdev - * @param mac + * @cdev: Qed dev pointer. + * @mac: MAC. * + * Return: Int. */ int (*update_mac)(struct qed_dev *cdev, u8 *mac); /** - * @brief update_mtu - API to inform the change in the mtu + * update_mtu(): API to inform the change in the mtu. * - * @param cdev - * @param mtu + * @cdev: Qed dev pointer. + * @mtu: MTU. * + * Return: Int. */ int (*update_mtu)(struct qed_dev *cdev, u16 mtu); /** - * @brief update_wol - update of changes in the WoL configuration + * update_wol(): Update of changes in the WoL configuration. + * + * @cdev: Qed dev pointer. + * @enabled: true iff WoL should be enabled. * - * @param cdev - * @param enabled - true iff WoL should be enabled. + * Return: Int. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); /** - * @brief read_module_eeprom + * read_module_eeprom(): Read EEPROM. * - * @param cdev - * @param buf - buffer - * @param dev_addr - PHY device memory region - * @param offset - offset into eeprom contents to be read - * @param len - buffer length, i.e., max bytes to be read + * @cdev: Qed dev pointer. + * @buf: buffer. + * @dev_addr: PHY device memory region. + * @offset: offset into eeprom contents to be read. + * @len: buffer length, i.e., max bytes to be read. + * + * Return: Int. */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); /** - * @brief get_affin_hwfn_idx + * get_affin_hwfn_idx(): Get affine HW function. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: u8. */ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param buf - buffer - * @param cmd - NVM CFG command id - * @param entity_id - Entity id + * read_nvm_cfg(): Read NVM config attribute value. + * + * @cdev: Qed dev pointer. + * @buf: Buffer. + * @cmd: NVM CFG command id. + * @entity_id: Entity id. * + * Return: Int. */ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, u32 entity_id); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param cmd - NVM CFG command id + * read_nvm_cfg_len(): Read NVM config attribute value. * - * @return config id length, 0 on error. + * @cdev: Qed dev pointer. + * @cmd: NVM CFG command id. + * + * Return: config id length, 0 on error. */ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); /** - * @brief set_grc_config - Configure value for grc config id. - * @param cdev - * @param cfg_id - grc config id - * @param val - grc config value + * set_grc_config(): Configure value for grc config id. + * + * @cdev: Qed dev pointer. + * @cfg_id: grc config id + * @val: grc config value * + * Return: Int. */ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); @@ -1397,18 +1422,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /** + * qed_sb_ack(): This function creates an update command for interrupts + * that is written to the IGU. * - * @brief This function creates an update command for interrupts that is - * written to the IGU. - * - * @param sb_info - This is the structure allocated and - * initialized per status block. Assumption is - * that it was initialized using qed_sb_init - * @param int_cmd - Enable/Disable/Nop - * @param upd_flg - whether igu consumer should be - * updated. + * @sb_info: This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @int_cmd: Enable/Disable/Nop + * @upd_flg: Whether igu consumer should be updated. * - * @return inline void + * Return: inline void. */ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 04180d9af560..494cdc3cd840 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. - * @change_mac Change MAC of interface + * @change_mac: Change MAC of interface * @param cdev * @param handle - the connection handle. * @param mac - new MAC to configure. diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ff808d248883..5b67cd03276e 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags { struct qed_ll2_ops { /** - * @brief start - initializes ll2 + * start(): Initializes ll2. * - * @param cdev - * @param params - protocol driver configuration for the ll2. + * @cdev: Qed dev pointer. + * @params: Protocol driver configuration for the ll2. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); /** - * @brief stop - stops the ll2 + * stop(): Stops the ll2 * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*stop)(struct qed_dev *cdev); /** - * @brief start_xmit - transmits an skb over the ll2 interface + * start_xmit(): Transmits an skb over the ll2 interface * - * @param cdev - * @param skb - * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. + * @cdev: Qed dev pointer. + * @skb: SKB. + * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, unsigned long xmit_flags); /** - * @brief register_cb_ops - protocol driver register the callback for Rx/Tx + * register_cb_ops(): Protocol driver register the callback for Rx/Tx * packets. Should be called before `start'. * - * @param cdev - * @param cookie - to be passed to the callback functions. - * @param ops - the callback functions to register for Rx / Tx. + * @cdev: Qed dev pointer. + * @cookie: to be passed to the callback functions. + * @ops: the callback functions to register for Rx / Tx. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ void (*register_cb_ops)(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie); /** - * @brief get LL2 related statistics + * get_stats(): Get LL2 related statistics. * - * @param cdev - * @param stats - pointer to struct that would be filled with stats + * @cdev: Qed dev pointer. + * @stats: Pointer to struct that would be filled with stats. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats); }; diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h index 14671bc19ed1..1d51df347560 100644 --- a/include/linux/qed/qed_nvmetcp_if.h +++ b/include/linux/qed/qed_nvmetcp_if.h @@ -171,6 +171,23 @@ struct nvmetcp_task_params { * @param dest_port * @clear_all_filters: Clear all filters. * @param cdev + * @init_read_io: Init read IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_write_io: Init write IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_icreq_exchange: Exchange ICReq. + * @task_params + * @init_conn_req_pdu_hdr + * @tx_sgl_task_params + * @rx_sgl_task_params + * @init_task_cleanup: Init task cleanup. + * @task_params */ struct qed_nvmetcp_ops { const struct qed_common_ops *common; diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 5cdfcb873a8f..772d45b2a60a 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state) struct raid_template *raid_class_attach(struct raid_function_template *); void raid_class_release(struct raid_template *); - -int __must_check raid_component_add(struct raid_template *, struct device *, - struct device *); - diff --git a/include/linux/sched.h b/include/linux/sched.h index 7c17742d359c..7bfc2b45cd99 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1797,7 +1797,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags) } extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); +extern int task_can_attach(struct task_struct *p); +extern int dl_bw_alloc(int cpu, u64 dl_bw); +extern void dl_bw_free(int cpu, u64 dl_bw); #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 6e18ca234f81..4273505d309a 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -63,6 +63,7 @@ struct sk_psock_progs { enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, + SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index a960de68ac69..6047058d6703 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -148,6 +148,10 @@ retry: if (gso_type & SKB_GSO_UDP) nh_off -= thlen; + /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 5a91b548ecc0..8d52c4506762 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -588,7 +588,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, static inline unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->out_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** @@ -600,7 +607,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) static inline unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->cap_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** diff --git a/include/net/bonding.h b/include/net/bonding.h index e4453cf4f017..08d222752cc8 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -700,37 +700,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, - const u8 *mac) +static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) { struct list_head *iter; struct slave *tmp; bond_for_each_slave_rcu(bond, tmp, iter) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} - -/* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - struct netdev_hw_addr *ha; - - bond_for_each_slave_rcu(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - return false; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 27336fc70467..963a810ed70d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -510,6 +510,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; + if (iftype == NL80211_IFTYPE_AP_VLAN) + iftype = NL80211_IFTYPE_AP; + for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index d486bddda15d..1458b3eae8ad 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1144,6 +1144,29 @@ int __nft_release_basechain(struct nft_ctx *ctx); unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); +static inline bool nft_use_inc(u32 *use) +{ + if (*use == UINT_MAX) + return false; + + (*use)++; + + return true; +} + +static inline void nft_use_dec(u32 *use) +{ + WARN_ON_ONCE((*use)-- == 0); +} + +/* For error and abort path: restore use counter to previous state. */ +static inline void nft_use_inc_restore(u32 *use) +{ + WARN_ON_ONCE(!nft_use_inc(use)); +} + +#define nft_use_dec_restore nft_use_dec + /** * struct nft_table - nf_tables table * @@ -1227,8 +1250,8 @@ struct nft_object { struct list_head list; struct rhlist_head rhlhead; struct nft_object_hash_key key; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; u16 udlen; u8 *udata; @@ -1330,8 +1353,8 @@ struct nft_flowtable { char *name; int hooknum; int ops_len; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; /* runtime data below here */ struct list_head hook_list ____cacheline_aligned; diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 9f48733bfd21..a2a74e0e5c49 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -175,8 +175,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, int rtnl_delete_link(struct net_device *dev); int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr); +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr); struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sock.h b/include/net/sock.h index 93a6717213ae..640bd7a36777 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1259,6 +1259,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@ -1381,6 +1382,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; } +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!READ_ONCE(*sk->sk_prot->memory_pressure); +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) @@ -1390,7 +1397,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long @@ -1448,7 +1455,7 @@ proto_memory_pressure(struct proto *prot) { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); } diff --git a/include/net/tls.h b/include/net/tls.h index bf3d63a52788..eda0015c5c59 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -179,6 +179,8 @@ struct tls_offload_context_tx { struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; void (*sk_destruct)(struct sock *sk); + struct work_struct destruct_work; + struct tls_context *ctx; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough diff --git a/include/net/vxlan.h b/include/net/vxlan.h index cf1d870f7b9a..e149a0b6f9a3 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -497,12 +497,12 @@ static inline void vxlan_flag_attr_error(int attrtype, } static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, - int hash, + u32 hash, struct vxlan_rdst *rdst) { struct fib_nh_common *nhc; - nhc = nexthop_path_fdb_result(nh, hash); + nhc = nexthop_path_fdb_result(nh, hash >> 1); if (unlikely(!nhc)) return false; diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 29414288ea3e..34ce197bd76e 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit, TRACE_EVENT(jbd2_shrink_checkpoint_list, TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, - unsigned long nr_freed, unsigned long nr_scanned, - tid_t next_tid), + unsigned long nr_freed, tid_t next_tid), - TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, - nr_scanned, next_tid), + TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid), TP_STRUCT__entry( __field(dev_t, dev) @@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list, __field(tid_t, tid) __field(tid_t, last_tid) __field(unsigned long, nr_freed) - __field(unsigned long, nr_scanned) __field(tid_t, next_tid) ), @@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list, __entry->tid = tid; __entry->last_tid = last_tid; __entry->nr_freed = nr_freed; - __entry->nr_scanned = nr_scanned; __entry->next_tid = next_tid; ), TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " - "scanned %lu next transaction %u", + "next transaction %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->first_tid, __entry->tid, __entry->last_tid, - __entry->nr_freed, __entry->nr_scanned, __entry->next_tid) + __entry->nr_freed, __entry->next_tid) ); #endif /* _TRACE_JBD2_H */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 774b1ae8adf7..7c98a820c8dd 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4375,9 +4375,11 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_NONBLOCK) { /* * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, - * it'll always -EAGAIN + * it'll always -EAGAIN. Note that we test for __O_TMPFILE + * because O_TMPFILE includes O_DIRECTORY, which isn't a flag + * we need to force async for. */ - if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; op.lookup_flags |= LOOKUP_CACHED; op.open_flag |= O_NONBLOCK; @@ -7802,12 +7804,21 @@ static int io_run_task_work_sig(void) return -EINTR; } +static bool current_pending_io(void) +{ + struct io_uring_task *tctx = current->io_uring; + + if (!tctx) + return false; + return percpu_counter_read_positive(&tctx->inflight); +} + /* when returns >0, the caller should retry */ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, ktime_t *timeout) { - int token, ret; + int io_wait, ret; /* make sure we run task_work before checking for signals */ ret = io_run_task_work_sig(); @@ -7818,15 +7829,17 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 1; /* - * Use io_schedule_prepare/finish, so cpufreq can take into account - * that the task is waiting for IO - turns out to be important for low - * QD IO. + * Mark us as being in io_wait if we have pending requests, so cpufreq + * can take into account that the task is waiting for IO - turns out + * to be important for low QD IO. */ - token = io_schedule_prepare(); + io_wait = current->in_iowait; + if (current_pending_io()) + current->in_iowait = 1; ret = 1; if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) ret = -ETIME; - io_schedule_finish(token); + current->in_iowait = io_wait; return ret; } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index db6221773e43..8d1c4b3ee760 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -26,6 +26,7 @@ #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/capability.h> +#include <linux/completion.h> #include <trace/events/xdp.h> #include <linux/netdevice.h> /* netif_receive_skb_list */ @@ -70,6 +71,7 @@ struct bpf_cpu_map_entry { struct rcu_head rcu; struct work_struct kthread_stop_wq; + struct completion kthread_running; }; struct bpf_cpu_map { @@ -133,11 +135,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) * invoked cpu_map_kthread_stop(). Catch any broken behaviour * gracefully and warn once. */ - struct xdp_frame *xdpf; + void *ptr; - while ((xdpf = ptr_ring_consume(ring))) - if (WARN_ON_ONCE(xdpf)) - xdp_return_frame(xdpf); + while ((ptr = ptr_ring_consume(ring))) { + WARN_ON_ONCE(1); + if (unlikely(__ptr_test_bit(0, &ptr))) { + __ptr_clear_bit(0, &ptr); + kfree_skb(ptr); + continue; + } + xdp_return_frame(ptr); + } } static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) @@ -157,7 +165,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) static void cpu_map_kthread_stop(struct work_struct *work) { struct bpf_cpu_map_entry *rcpu; - int err; rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); @@ -167,14 +174,7 @@ static void cpu_map_kthread_stop(struct work_struct *work) rcu_barrier(); /* kthread_stop will wake_up_process and wait for it to complete */ - err = kthread_stop(rcpu->kthread); - if (err) { - /* kthread_stop may be called before cpu_map_kthread_run - * is executed, so we need to release the memory related - * to rcpu. - */ - put_cpu_map_entry(rcpu); - } + kthread_stop(rcpu->kthread); } static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, @@ -302,11 +302,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, return nframes; } - static int cpu_map_kthread_run(void *data) { struct bpf_cpu_map_entry *rcpu = data; + complete(&rcpu->kthread_running); set_current_state(TASK_INTERRUPTIBLE); /* When kthread gives stop order, then rcpu have been disconnected @@ -469,6 +469,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, goto free_ptr_ring; /* Setup kthread */ + init_completion(&rcpu->kthread_running); rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, "cpumap/%d/map:%d", cpu, map->id); @@ -482,6 +483,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); + /* Make sure kthread has been running, so kthread_stop() will not + * stop the kthread prematurely and all pending frames or skbs + * will be handled by the kthread before kthread_stop() returns. + */ + wait_for_completion(&rcpu->kthread_running); + return rcpu; free_prog: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e1848a2a7230..ecf4332ff312 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -517,6 +517,12 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id) func_id == BPF_FUNC_skc_to_tcp_request_sock; } +static bool is_callback_calling_function(enum bpf_func_id func_id) +{ + return func_id == BPF_FUNC_for_each_map_elem || + func_id == BPF_FUNC_timer_set_callback; +} + static bool is_cmpxchg_insn(const struct bpf_insn *insn) { return BPF_CLASS(insn->code) == BPF_STX && @@ -1446,7 +1452,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; - reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; + reg->precise = !env->bpf_capable; __mark_reg_unbounded(reg); } @@ -2267,6 +2273,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) return -ENOTSUPP; + /* BPF helpers that invoke callback subprogs are + * equivalent to BPF_PSEUDO_CALL above + */ + if (insn->src_reg == 0 && is_callback_calling_function(insn->imm)) + return -ENOTSUPP; /* regular helper call sets R0 */ *reg_mask &= ~1; if (*reg_mask & 0x3f) { @@ -2371,8 +2382,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, /* big hammer: mark all scalars precise in this path. * pop_stack may still get !precise scalars. + * We also skip current state and go straight to first parent state, + * because precision markings in current non-checkpointed state are + * not needed. See why in the comment in __mark_chain_precision below. */ - for (; st; st = st->parent) + for (st = st->parent; st; st = st->parent) { for (i = 0; i <= st->curframe; i++) { func = st->frame[i]; for (j = 0; j < BPF_REG_FP; j++) { @@ -2390,8 +2404,121 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } } + } +} + +static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) +{ + struct bpf_func_state *func; + struct bpf_reg_state *reg; + int i, j; + + for (i = 0; i <= st->curframe; i++) { + func = st->frame[i]; + for (j = 0; j < BPF_REG_FP; j++) { + reg = &func->regs[j]; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { + if (!is_spilled_reg(&func->stack[j])) + continue; + reg = &func->stack[j].spilled_ptr; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + } } +/* + * __mark_chain_precision() backtracks BPF program instruction sequence and + * chain of verifier states making sure that register *regno* (if regno >= 0) + * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked + * SCALARS, as well as any other registers and slots that contribute to + * a tracked state of given registers/stack slots, depending on specific BPF + * assembly instructions (see backtrack_insns() for exact instruction handling + * logic). This backtracking relies on recorded jmp_history and is able to + * traverse entire chain of parent states. This process ends only when all the + * necessary registers/slots and their transitive dependencies are marked as + * precise. + * + * One important and subtle aspect is that precise marks *do not matter* in + * the currently verified state (current state). It is important to understand + * why this is the case. + * + * First, note that current state is the state that is not yet "checkpointed", + * i.e., it is not yet put into env->explored_states, and it has no children + * states as well. It's ephemeral, and can end up either a) being discarded if + * compatible explored state is found at some point or BPF_EXIT instruction is + * reached or b) checkpointed and put into env->explored_states, branching out + * into one or more children states. + * + * In the former case, precise markings in current state are completely + * ignored by state comparison code (see regsafe() for details). Only + * checkpointed ("old") state precise markings are important, and if old + * state's register/slot is precise, regsafe() assumes current state's + * register/slot as precise and checks value ranges exactly and precisely. If + * states turn out to be compatible, current state's necessary precise + * markings and any required parent states' precise markings are enforced + * after the fact with propagate_precision() logic, after the fact. But it's + * important to realize that in this case, even after marking current state + * registers/slots as precise, we immediately discard current state. So what + * actually matters is any of the precise markings propagated into current + * state's parent states, which are always checkpointed (due to b) case above). + * As such, for scenario a) it doesn't matter if current state has precise + * markings set or not. + * + * Now, for the scenario b), checkpointing and forking into child(ren) + * state(s). Note that before current state gets to checkpointing step, any + * processed instruction always assumes precise SCALAR register/slot + * knowledge: if precise value or range is useful to prune jump branch, BPF + * verifier takes this opportunity enthusiastically. Similarly, when + * register's value is used to calculate offset or memory address, exact + * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to + * what we mentioned above about state comparison ignoring precise markings + * during state comparison, BPF verifier ignores and also assumes precise + * markings *at will* during instruction verification process. But as verifier + * assumes precision, it also propagates any precision dependencies across + * parent states, which are not yet finalized, so can be further restricted + * based on new knowledge gained from restrictions enforced by their children + * states. This is so that once those parent states are finalized, i.e., when + * they have no more active children state, state comparison logic in + * is_state_visited() would enforce strict and precise SCALAR ranges, if + * required for correctness. + * + * To build a bit more intuition, note also that once a state is checkpointed, + * the path we took to get to that state is not important. This is crucial + * property for state pruning. When state is checkpointed and finalized at + * some instruction index, it can be correctly and safely used to "short + * circuit" any *compatible* state that reaches exactly the same instruction + * index. I.e., if we jumped to that instruction from a completely different + * code path than original finalized state was derived from, it doesn't + * matter, current state can be discarded because from that instruction + * forward having a compatible state will ensure we will safely reach the + * exit. States describe preconditions for further exploration, but completely + * forget the history of how we got here. + * + * This also means that even if we needed precise SCALAR range to get to + * finalized state, but from that point forward *that same* SCALAR register is + * never used in a precise context (i.e., it's precise value is not needed for + * correctness), it's correct and safe to mark such register as "imprecise" + * (i.e., precise marking set to false). This is what we rely on when we do + * not set precise marking in current state. If no child state requires + * precision for any given SCALAR register, it's safe to dictate that it can + * be imprecise. If any child state does require this register to be precise, + * we'll mark it precise later retroactively during precise markings + * propagation from child state to parent states. + * + * Skipping precise marking setting in current state is a mild version of + * relying on the above observation. But we can utilize this property even + * more aggressively by proactively forgetting any precise marking in the + * current state (which we inherited from the parent state), right before we + * checkpoint it and branch off into new child state. This is done by + * mark_all_scalars_imprecise() to hopefully get more permissive and generic + * finalized states which help in short circuiting more future states. + */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { @@ -2409,6 +2536,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (!env->bpf_capable) return 0; + /* Do sanity checks against current state of register and/or stack + * slot, but don't set precise flag in current state, as precision + * tracking in the current state is unnecessary. + */ func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; @@ -2416,11 +2547,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } - if (!reg->precise) - new_marks = true; - else - reg_mask = 0; - reg->precise = true; + new_marks = true; } while (spi >= 0) { @@ -2433,11 +2560,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r stack_mask = 0; break; } - if (!reg->precise) - new_marks = true; - else - stack_mask = 0; - reg->precise = true; + new_marks = true; break; } @@ -2445,12 +2568,42 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r return 0; if (!reg_mask && !stack_mask) return 0; + for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); + + if (last_idx < 0) { + /* we are at the entry into subprog, which + * is expected for global funcs, but only if + * requested precise registers are R1-R5 + * (which are global func's input arguments) + */ + if (st->curframe == 0 && + st->frame[0]->subprogno > 0 && + st->frame[0]->callsite == BPF_MAIN_FUNC && + stack_mask == 0 && (reg_mask & ~0x3e) == 0) { + bitmap_from_u64(mask, reg_mask); + for_each_set_bit(i, mask, 32) { + reg = &st->frame[0]->regs[i]; + if (reg->type != SCALAR_VALUE) { + reg_mask &= ~(1u << i); + continue; + } + reg->precise = true; + } + return 0; + } + + verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n", + st->frame[0]->subprogno, reg_mask, stack_mask); + WARN_ONCE(1, "verifier backtracking bug"); + return -EFAULT; + } + for (i = last_idx;;) { if (skip_first) { err = 0; @@ -5806,6 +5959,10 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, struct bpf_func_state *callee, int insn_idx); +static int set_callee_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, int insn_idx); + static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx, int subprog, set_callee_state_fn set_callee_state_cb) @@ -5856,6 +6013,16 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn } } + /* set_callee_state is used for direct subprog calls, but we are + * interested in validating only BPF helpers that can call subprogs as + * callbacks + */ + if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) { + verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; + } + if (insn->code == (BPF_JMP | BPF_CALL) && insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) { @@ -10301,7 +10468,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, if (env->explore_alu_limits) return false; if (rcur->type == SCALAR_VALUE) { - if (!rold->precise && !rcur->precise) + if (!rold->precise) return true; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && @@ -10850,6 +11017,10 @@ next: env->prev_jmps_processed = env->jmps_processed; env->prev_insn_processed = env->insn_processed; + /* forget precise markings we inherited, see __mark_chain_precision */ + if (env->bpf_capable) + mark_all_scalars_imprecise(env, cur); + /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); @@ -13209,6 +13380,8 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, subprog); + state->first_insn_idx = env->subprog_info[subprog].start; + state->last_insn_idx = -1; regs = state->frame[state->curframe]->regs; if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6ccdbce17399..be467aea457e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -56,6 +56,7 @@ #include <linux/file.h> #include <linux/fs_parser.h> #include <linux/sched/cputime.h> +#include <linux/sched/deadline.h> #include <linux/psi.h> #include <net/sock.h> @@ -6467,6 +6468,9 @@ void cgroup_exit(struct task_struct *tsk) list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + if (dl_task(tsk)) + dec_dl_tasks_cs(tsk); + WARN_ON_ONCE(cgroup_task_frozen(tsk)); if (unlikely(!(tsk->flags & PF_KTHREAD) && test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags))) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index fb895eaf3a7c..6905079c15c2 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -162,6 +162,14 @@ struct cpuset { int use_parent_ecpus; int child_ecpus_count; + /* + * number of SCHED_DEADLINE tasks attached to this cpuset, so that we + * know when to rebuild associated root domain bandwidth information. + */ + int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; + /* Handle for cpuset.cpus.partition */ struct cgroup_file partition_file; }; @@ -209,6 +217,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs) return css_cs(cs->css.parent); } +void inc_dl_tasks_cs(struct task_struct *p) +{ + struct cpuset *cs = task_cs(p); + + cs->nr_deadline_tasks++; +} + +void dec_dl_tasks_cs(struct task_struct *p) +{ + struct cpuset *cs = task_cs(p); + + cs->nr_deadline_tasks--; +} + /* bits in struct cpuset flags field */ typedef enum { CS_ONLINE, @@ -312,22 +334,23 @@ static struct cpuset top_cpuset = { if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) /* - * There are two global locks guarding cpuset structures - cpuset_rwsem and + * There are two global locks guarding cpuset structures - cpuset_mutex and * callback_lock. We also require taking task_lock() when dereferencing a * task's cpuset pointer. See "The task_lock() exception", at the end of this - * comment. The cpuset code uses only cpuset_rwsem write lock. Other - * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to - * prevent change to cpuset structures. + * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems + * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset + * structures. Note that cpuset_mutex needs to be a mutex as it is used in + * paths that rely on priority inheritance (e.g. scheduler - on RT) for + * correctness. * * A task must hold both locks to modify cpusets. If a task holds - * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it - * is the only task able to also acquire callback_lock and be able to - * modify cpusets. It can perform various checks on the cpuset structure - * first, knowing nothing will change. It can also allocate memory while - * just holding cpuset_rwsem. While it is performing these checks, various - * callback routines can briefly acquire callback_lock to query cpusets. - * Once it is ready to make the changes, it takes callback_lock, blocking - * everyone else. + * cpuset_mutex, it blocks others, ensuring that it is the only task able to + * also acquire callback_lock and be able to modify cpusets. It can perform + * various checks on the cpuset structure first, knowing nothing will change. + * It can also allocate memory while just holding cpuset_mutex. While it is + * performing these checks, various callback routines can briefly acquire + * callback_lock to query cpusets. Once it is ready to make the changes, it + * takes callback_lock, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding * callback_lock, as that would risk double tripping on callback_lock @@ -349,16 +372,16 @@ static struct cpuset top_cpuset = { * guidelines for accessing subsystem state in kernel/cgroup.c */ -DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); +static DEFINE_MUTEX(cpuset_mutex); -void cpuset_read_lock(void) +void cpuset_lock(void) { - percpu_down_read(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); } -void cpuset_read_unlock(void) +void cpuset_unlock(void) { - percpu_up_read(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } static DEFINE_SPINLOCK(callback_lock); @@ -396,7 +419,7 @@ static inline bool is_in_v2_mode(void) * One way or another, we guarantee to return some non-empty subset * of cpu_online_mask. * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_cpus(struct task_struct *tsk, struct cpumask *pmask) @@ -438,7 +461,7 @@ out_unlock: * One way or another, we guarantee to return some non-empty subset * of node_states[N_MEMORY]. * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) { @@ -450,7 +473,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) /* * update task's spread flag if cpuset's page/slab spread flag is set * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_mutex held. */ static void cpuset_update_task_spread_flag(struct cpuset *cs, struct task_struct *tsk) @@ -471,7 +494,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding cpuset_rwsem. + * are only set if the other's are set. Call holding cpuset_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -580,7 +603,7 @@ static inline void free_cpuset(struct cpuset *cs) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * cpuset_rwsem held. + * cpuset_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -703,7 +726,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, rcu_read_unlock(); } -/* Must be called with cpuset_rwsem held. */ +/* Must be called with cpuset_mutex held. */ static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ @@ -729,7 +752,7 @@ static inline int nr_cpusets(void) * domains when operating in the severe memory shortage situations * that could cause allocation failures below. * - * Must be called with cpuset_rwsem held. + * Must be called with cpuset_mutex held. * * The three key local variables below are: * cp - cpuset pointer, used (together with pos_css) to perform a @@ -940,11 +963,14 @@ done: return ndoms; } -static void update_tasks_root_domain(struct cpuset *cs) +static void dl_update_tasks_root_domain(struct cpuset *cs) { struct css_task_iter it; struct task_struct *task; + if (cs->nr_deadline_tasks == 0) + return; + css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) @@ -953,12 +979,12 @@ static void update_tasks_root_domain(struct cpuset *cs) css_task_iter_end(&it); } -static void rebuild_root_domains(void) +static void dl_rebuild_rd_accounting(void) { struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); lockdep_assert_cpus_held(); lockdep_assert_held(&sched_domains_mutex); @@ -981,7 +1007,7 @@ static void rebuild_root_domains(void) rcu_read_unlock(); - update_tasks_root_domain(cs); + dl_update_tasks_root_domain(cs); rcu_read_lock(); css_put(&cs->css); @@ -995,7 +1021,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], { mutex_lock(&sched_domains_mutex); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); - rebuild_root_domains(); + dl_rebuild_rd_accounting(); mutex_unlock(&sched_domains_mutex); } @@ -1008,7 +1034,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], * 'cpus' is removed, then call this routine to rebuild the * scheduler's dynamic sched domains. * - * Call with cpuset_rwsem held. Takes cpus_read_lock(). + * Call with cpuset_mutex held. Takes cpus_read_lock(). */ static void rebuild_sched_domains_locked(void) { @@ -1019,7 +1045,7 @@ static void rebuild_sched_domains_locked(void) int ndoms; lockdep_assert_cpus_held(); - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * If we have raced with CPU hotplug, return early to avoid @@ -1070,9 +1096,9 @@ static void rebuild_sched_domains_locked(void) void rebuild_sched_domains(void) { cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); rebuild_sched_domains_locked(); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } @@ -1081,7 +1107,7 @@ void rebuild_sched_domains(void) * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * * Iterate through each task of @cs updating its cpus_allowed to the - * effective cpuset's. As this function is called with cpuset_rwsem held, + * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. */ static void update_tasks_cpumask(struct cpuset *cs) @@ -1188,7 +1214,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, int old_prs, new_prs; bool part_error = false; /* Partition error? */ - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * The parent must be a partition root. @@ -1358,7 +1384,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, * * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. * - * Called with cpuset_rwsem held + * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) { @@ -1521,7 +1547,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct cpuset *sibling; struct cgroup_subsys_state *pos_css; - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * Check all its siblings and call update_cpumasks_hier() @@ -1724,12 +1750,12 @@ static void *cpuset_being_rebound; * @cs: the cpuset in which each task's mems_allowed mask needs to be changed * * Iterate through each task of @cs updating its mems_allowed to the - * effective cpuset's. As this function is called with cpuset_rwsem held, + * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. */ static void update_tasks_nodemask(struct cpuset *cs) { - static nodemask_t newmems; /* protected by cpuset_rwsem */ + static nodemask_t newmems; /* protected by cpuset_mutex */ struct css_task_iter it; struct task_struct *task; @@ -1742,7 +1768,7 @@ static void update_tasks_nodemask(struct cpuset *cs) * take while holding tasklist_lock. Forks can happen - the * mpol_dup() cpuset_being_rebound check will catch such forks, * and rebind their vma mempolicies too. Because we still hold - * the global cpuset_rwsem, we know that no other rebind effort + * the global cpuset_mutex, we know that no other rebind effort * will be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. @@ -1788,7 +1814,7 @@ static void update_tasks_nodemask(struct cpuset *cs) * * On legacy hierarchy, effective_mems will be the same with mems_allowed. * - * Called with cpuset_rwsem held + * Called with cpuset_mutex held */ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) { @@ -1841,7 +1867,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) * mempolicies and if the cpuset is marked 'memory_migrate', * migrate the tasks pages to the new memory. * - * Call with cpuset_rwsem held. May take callback_lock during call. + * Call with cpuset_mutex held. May take callback_lock during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_lock, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. @@ -1931,7 +1957,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) * @cs: the cpuset in which each task's spread flags needs to be changed * * Iterate through each task of @cs updating its spread flags. As this - * function is called with cpuset_rwsem held, cpuset membership stays + * function is called with cpuset_mutex held, cpuset membership stays * stable. */ static void update_tasks_flags(struct cpuset *cs) @@ -1951,7 +1977,7 @@ static void update_tasks_flags(struct cpuset *cs) * cs: the cpuset to update * turning_on: whether the flag is being set or cleared * - * Call with cpuset_rwsem held. + * Call with cpuset_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, @@ -2000,7 +2026,7 @@ out: * cs: the cpuset to update * new_prs: new partition root state * - * Call with cpuset_rwsem held. + * Call with cpuset_mutex held. */ static int update_prstate(struct cpuset *cs, int new_prs) { @@ -2182,19 +2208,26 @@ static int fmeter_getrate(struct fmeter *fmp) static struct cpuset *cpuset_attach_old_cs; -/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */ +static void reset_migrate_dl_data(struct cpuset *cs) +{ + cs->nr_migrate_dl_tasks = 0; + cs->sum_migrate_dl_bw = 0; +} + +/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ static int cpuset_can_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; - struct cpuset *cs; + struct cpuset *cs, *oldcs; struct task_struct *task; int ret; /* used later by cpuset_attach() */ cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); + oldcs = cpuset_attach_old_cs; cs = css_cs(css); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* allow moving tasks into an empty cpuset if on default hierarchy */ ret = -ENOSPC; @@ -2203,14 +2236,39 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) goto out_unlock; cgroup_taskset_for_each(task, css, tset) { - ret = task_can_attach(task, cs->effective_cpus); + ret = task_can_attach(task); if (ret) goto out_unlock; ret = security_task_setscheduler(task); if (ret) goto out_unlock; + + if (dl_task(task)) { + cs->nr_migrate_dl_tasks++; + cs->sum_migrate_dl_bw += task->dl.dl_bw; + } } + if (!cs->nr_migrate_dl_tasks) + goto out_success; + + if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { + int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); + + if (unlikely(cpu >= nr_cpu_ids)) { + reset_migrate_dl_data(cs); + ret = -EINVAL; + goto out_unlock; + } + + ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); + if (ret) { + reset_migrate_dl_data(cs); + goto out_unlock; + } + } + +out_success: /* * Mark attach is in progress. This makes validate_change() fail * changes which zero cpus/mems_allowed. @@ -2218,7 +2276,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) cs->attach_in_progress++; ret = 0; out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); return ret; } @@ -2230,15 +2288,23 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + + if (cs->nr_migrate_dl_tasks) { + int cpu = cpumask_any(cs->effective_cpus); + + dl_bw_free(cpu, cs->sum_migrate_dl_bw); + reset_migrate_dl_data(cs); + } + + mutex_unlock(&cpuset_mutex); } /* - * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach() + * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() * but we can't allocate it dynamically there. Define it global and * allocate from cpuset_init(). */ @@ -2246,7 +2312,7 @@ static cpumask_var_t cpus_attach; static void cpuset_attach(struct cgroup_taskset *tset) { - /* static buf protected by cpuset_rwsem */ + /* static buf protected by cpuset_mutex */ static nodemask_t cpuset_attach_nodemask_to; struct task_struct *task; struct task_struct *leader; @@ -2258,7 +2324,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cs = css_cs(css); lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); @@ -2306,11 +2372,17 @@ static void cpuset_attach(struct cgroup_taskset *tset) cs->old_mems_allowed = cpuset_attach_nodemask_to; + if (cs->nr_migrate_dl_tasks) { + cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; + oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; + reset_migrate_dl_data(cs); + } + cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* The various types of files and directories in a cpuset file system */ @@ -2342,7 +2414,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, int retval = 0; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) { retval = -ENODEV; goto out_unlock; @@ -2378,7 +2450,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, break; } out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return retval; } @@ -2391,7 +2463,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, int retval = -ENODEV; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -2404,7 +2476,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, break; } out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return retval; } @@ -2437,7 +2509,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, * operation like this one can lead to a deadlock through kernfs * active_ref protection. Let's break the protection. Losing the * protection is okay as we check whether @cs is online after - * grabbing cpuset_rwsem anyway. This only happens on the legacy + * grabbing cpuset_mutex anyway. This only happens on the legacy * hierarchies. */ css_get(&cs->css); @@ -2445,7 +2517,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, flush_work(&cpuset_hotplug_work); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -2469,7 +2541,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, free_cpuset(trialcs); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); kernfs_unbreak_active_protection(of->kn); css_put(&cs->css); @@ -2602,13 +2674,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, css_get(&cs->css); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; retval = update_prstate(cs, val); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); css_put(&cs->css); return retval ?: nbytes; @@ -2821,7 +2893,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) return 0; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); set_bit(CS_ONLINE, &cs->flags); if (is_spread_page(parent)) @@ -2872,7 +2944,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return 0; } @@ -2893,7 +2965,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) struct cpuset *cs = css_cs(css); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (is_partition_root(cs)) update_prstate(cs, 0); @@ -2912,7 +2984,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) cpuset_dec(); clear_bit(CS_ONLINE, &cs->flags); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } @@ -2925,7 +2997,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { @@ -2938,7 +3010,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) } spin_unlock_irq(&callback_lock); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* @@ -2980,8 +3052,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = { int __init cpuset_init(void) { - BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); - BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); @@ -3053,7 +3123,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs, is_empty = cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); /* * Move tasks to the nearest ancestor with execution resources, @@ -3063,7 +3133,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs, if (is_empty) remove_tasks_in_empty_cpuset(cs); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); } static void @@ -3113,14 +3183,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) retry: wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* * We have raced with task attaching. We wait until attaching * is finished, so we won't attach a task to an empty cpuset. */ if (cs->attach_in_progress) { - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); goto retry; } @@ -3198,7 +3268,7 @@ update_tasks: hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, cpus_updated, mems_updated); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /** @@ -3228,7 +3298,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) if (on_dfl && !alloc_cpumasks(NULL, &tmp)) ptmp = &tmp; - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* fetch the available cpus/mems and find out which changed how */ cpumask_copy(&new_cpus, cpu_active_mask); @@ -3285,7 +3355,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) update_tasks_nodemask(&top_cpuset); } - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); /* if cpus or mems changed, we need to propagate to descendants */ if (cpus_updated || mems_updated) { @@ -3695,7 +3765,7 @@ void __cpuset_memory_pressure_bump(void) * - Used for /proc/<pid>/cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take cpuset_rwsem, keeping cpuset_attach() from changing it + * and we take cpuset_mutex, keeping cpuset_attach() from changing it * anyway. */ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index b4526668072e..27596f3b4aef 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i; - pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < count; i++) pages[i] = nth_page(page, i); vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); - kfree(pages); + kvfree(pages); return vaddr; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 97052b2dff7e..c7f13da672c9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1224,6 +1224,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) return 0; } +static int perf_mux_hrtimer_restart_ipi(void *arg) +{ + return perf_mux_hrtimer_restart(arg); +} + void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); @@ -11137,8 +11142,7 @@ perf_event_mux_interval_ms_store(struct device *dev, cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); - cpu_function_call(cpu, - (remote_function_f)perf_mux_hrtimer_restart, cpuctx); + cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx); } cpus_read_unlock(); mutex_unlock(&mux_interval_mutex); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d34a56f16d13..2324b7055260 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7309,6 +7309,7 @@ static int __sched_setscheduler(struct task_struct *p, int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; struct rq *rq; + bool cpuset_locked = false; /* The pi code expects interrupts enabled */ BUG_ON(pi && in_interrupt()); @@ -7405,8 +7406,14 @@ recheck: return retval; } - if (pi) - cpuset_read_lock(); + /* + * SCHED_DEADLINE bandwidth accounting relies on stable cpusets + * information. + */ + if (dl_policy(policy) || dl_policy(p->policy)) { + cpuset_locked = true; + cpuset_lock(); + } /* * Make sure no PI-waiters arrive (or leave) while we are @@ -7482,8 +7489,8 @@ change: if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; task_rq_unlock(rq, p, &rf); - if (pi) - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); goto recheck; } @@ -7550,7 +7557,8 @@ change: task_rq_unlock(rq, p, &rf); if (pi) { - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); rt_mutex_adjust_pi(p); } @@ -7562,8 +7570,8 @@ change: unlock: task_rq_unlock(rq, p, &rf); - if (pi) - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); return retval; } @@ -8781,8 +8789,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; } -int task_can_attach(struct task_struct *p, - const struct cpumask *cs_effective_cpus) +int task_can_attach(struct task_struct *p) { int ret = 0; @@ -8795,21 +8802,9 @@ int task_can_attach(struct task_struct *p, * success of set_cpus_allowed_ptr() on all attached tasks * before cpus_mask may be changed. */ - if (p->flags & PF_NO_SETAFFINITY) { + if (p->flags & PF_NO_SETAFFINITY) ret = -EINVAL; - goto out; - } - if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, - cs_effective_cpus)) { - int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); - - if (unlikely(cpu >= nr_cpu_ids)) - return -EINVAL; - ret = dl_cpu_busy(cpu, p); - } - -out: return ret; } @@ -9091,7 +9086,7 @@ static void cpuset_cpu_active(void) static int cpuset_cpu_inactive(unsigned int cpu) { if (!cpuhp_tasks_frozen) { - int ret = dl_cpu_busy(cpu, NULL); + int ret = dl_bw_check_overflow(cpu); if (ret) return ret; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b3e206498395..de45e4d2c61f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -17,6 +17,7 @@ */ #include "sched.h" #include "pelt.h" +#include <linux/cpuset.h> struct dl_bandwidth def_dl_bandwidth; @@ -2446,6 +2447,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (task_on_rq_queued(p) && p->dl.dl_runtime) task_non_contending(p); + /* + * In case a task is setscheduled out from SCHED_DEADLINE we need to + * keep track of that on its cpuset (for correct bandwidth tracking). + */ + dec_dl_tasks_cs(p); + if (!task_on_rq_queued(p)) { /* * Inactive timer is armed. However, p is leaving DEADLINE and @@ -2486,6 +2493,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) put_task_struct(p); + /* + * In case a task is setscheduled to SCHED_DEADLINE we need to keep + * track of that on its cpuset (for correct bandwidth tracking). + */ + inc_dl_tasks_cs(p); + /* If p is not queued we will update its parameters at next wakeup. */ if (!task_on_rq_queued(p)) { add_rq_bw(&p->dl, &rq->dl); @@ -2885,26 +2898,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; } -int dl_cpu_busy(int cpu, struct task_struct *p) +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc, + dl_bw_req_free +}; + +static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) { - unsigned long flags, cap; + unsigned long flags; struct dl_bw *dl_b; - bool overflow; + bool overflow = 0; rcu_read_lock_sched(); dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); - cap = dl_bw_capacity(cpu); - overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); - if (!overflow && p) { - /* - * We reserve space for this task in the destination - * root_domain, as we can't fail after this point. - * We will free resources in the source root_domain - * later on (see set_cpus_allowed_dl()). - */ - __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); + if (req == dl_bw_req_free) { + __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); + } else { + unsigned long cap = dl_bw_capacity(cpu); + + overflow = __dl_overflow(dl_b, cap, 0, dl_bw); + + if (req == dl_bw_req_alloc && !overflow) { + /* + * We reserve space in the destination + * root_domain, as we can't fail after this point. + * We will free resources in the source root_domain + * later on (see set_cpus_allowed_dl()). + */ + __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); + } } raw_spin_unlock_irqrestore(&dl_b->lock, flags); @@ -2912,6 +2937,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p) return overflow ? -EBUSY : 0; } + +int dl_bw_check_overflow(int cpu) +{ + return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); +} + +int dl_bw_alloc(int cpu, u64 dl_bw) +{ + return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); +} + +void dl_bw_free(int cpu, u64 dl_bw) +{ + dl_bw_manage(dl_bw_req_free, cpu, dl_bw); +} #endif #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6312f1904825..5061093d9baa 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -349,7 +349,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int dl_cpu_busy(int cpu, struct task_struct *p); +extern int dl_bw_check_overflow(int cpu); #ifdef CONFIG_CGROUP_SCHED diff --git a/kernel/softirq.c b/kernel/softirq.c index 322b65d45676..41f470929e99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -595,7 +595,8 @@ void irq_enter_rcu(void) { __irq_enter_raw(); - if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) + if (tick_nohz_full_cpu(smp_processor_id()) || + (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) tick_irq_enter(); account_hardirq_enter(current); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f42d0776bc84..7f5310d1a4d6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -180,6 +180,8 @@ static ktime_t tick_init_jiffy_update(void) return period; } +#define MAX_STALLED_JIFFIES 5 + static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int cpu = smp_processor_id(); @@ -207,6 +209,21 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) if (tick_do_timer_cpu == cpu) tick_do_update_jiffies64(now); + /* + * If jiffies update stalled for too long (timekeeper in stop_machine() + * or VMEXIT'ed for several msecs), force an update. + */ + if (ts->last_tick_jiffies != jiffies) { + ts->stalled_jiffies = 0; + ts->last_tick_jiffies = READ_ONCE(jiffies); + } else { + if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { + tick_do_update_jiffies64(now); + ts->stalled_jiffies = 0; + ts->last_tick_jiffies = READ_ONCE(jiffies); + } + } + if (ts->inidle) ts->got_idle_tick = 1; } @@ -933,6 +950,8 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) if (unlikely(expires == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); + else + tick_program_event(KTIME_MAX, 1); return; } @@ -1339,9 +1358,15 @@ static void tick_nohz_handler(struct clock_event_device *dev) tick_sched_do_timer(ts, now); tick_sched_handle(ts, regs); - /* No need to reprogram if we are running tickless */ - if (unlikely(ts->tick_stopped)) + if (unlikely(ts->tick_stopped)) { + /* + * The clockevent device is not reprogrammed, so change the + * clock event device to ONESHOT_STOPPED to avoid spurious + * interrupts on devices which might not be truly one shot. + */ + tick_program_event(KTIME_MAX, 1); return; + } hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); @@ -1395,6 +1420,13 @@ static inline void tick_nohz_irq_enter(void) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(ts, now); + /* + * If all CPUs are idle. We may need to update a stale jiffies value. + * Note nohz_full is a special case: a timekeeper is guaranteed to stay + * alive but it might be busy looping with interrupts disabled in some + * rare case (typically stop machine). So we must make sure we have a + * last resort. + */ if (ts->tick_stopped) tick_nohz_update_jiffies(now); } diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index d952ae393423..504649513399 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -49,6 +49,8 @@ enum tick_nohz_mode { * @timer_expires_base: Base time clock monotonic for @timer_expires * @next_timer: Expiry time of next expiring timer for debugging purpose only * @tick_dep_mask: Tick dependency mask - is set, if someone needs the tick + * @last_tick_jiffies: Value of jiffies seen on last tick + * @stalled_jiffies: Number of stalled jiffies detected across ticks */ struct tick_sched { struct hrtimer sched_timer; @@ -77,6 +79,8 @@ struct tick_sched { u64 next_timer; ktime_t idle_expires; atomic_t tick_dep_mask; + unsigned long last_tick_jiffies; + unsigned int stalled_jiffies; }; extern struct tick_sched *tick_get_tick_sched(int cpu); diff --git a/kernel/torture.c b/kernel/torture.c index bb8f411c974b..7233b847737f 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -915,7 +915,7 @@ void torture_kthread_stopping(char *title) VERBOSE_TOROUT_STRING(buf); while (!kthread_should_stop()) { torture_shutdown_absorb(title); - schedule_timeout_uninterruptible(1); + schedule_timeout_uninterruptible(HZ / 20); } } EXPORT_SYMBOL_GPL(torture_kthread_stopping); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b3531172d8e..6352a41380e5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -662,7 +662,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) { - int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); struct perf_raw_frag frag = { .copy = ctx_copy, .size = ctx_size, @@ -679,8 +678,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, }; struct perf_sample_data *sd; struct pt_regs *regs; + int nest_level; u64 ret; + preempt_disable(); + nest_level = this_cpu_inc_return(bpf_event_output_nest_level); + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { ret = -EBUSY; goto out; @@ -695,6 +698,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ret = __bpf_perf_event_output(regs, map, flags, sd); out: this_cpu_dec(bpf_event_output_nest_level); + preempt_enable(); return ret; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e1cef097b0df..db7cefd196ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -535,6 +535,7 @@ struct trace_buffer { unsigned flags; int cpus; atomic_t record_disabled; + atomic_t resizing; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; @@ -2137,7 +2138,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, /* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); - + atomic_inc(&buffer->resizing); if (cpu_id == RING_BUFFER_ALL_CPUS) { /* @@ -2276,6 +2277,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, atomic_dec(&buffer->record_disabled); } + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return 0; @@ -2296,6 +2298,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } } out_err_unlock: + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return err; } @@ -5497,6 +5500,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, if (local_read(&cpu_buffer_b->committing)) goto out_dec; + /* + * When resize is in progress, we cannot swap it because + * it will mess the state of the cpu buffer. + */ + if (atomic_read(&buffer_a->resizing)) + goto out_dec; + if (atomic_read(&buffer_b->resizing)) + goto out_dec; + buffer_a->buffers[cpu] = cpu_buffer_b; buffer_b->buffers[cpu] = cpu_buffer_a; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ae7005af78c3..8769cd18f622 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1872,9 +1872,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. + * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, - "Failed to swap buffers due to commit in progress\n"); + "Failed to swap buffers due to commit or resize in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); @@ -4107,8 +4108,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { + /* Close iter->trace before switching to the new current tracer */ + if (iter->trace->close) + iter->trace->close(iter); *iter->trace = *tr->current_trace; + /* Reopen the new current tracer */ + if (iter->trace->open) + iter->trace->open(iter); + } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE @@ -5170,11 +5178,17 @@ int tracing_set_cpumask(struct trace_array *tr, !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); +#endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); +#endif } } arch_spin_unlock(&tr->max_lock); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 590b3d51afae..ba37f768e2f2 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - + else + iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 2402de520eca..b239bfaa51ae 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); + else + iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index 0d3a686b5ba2..fb8c0c5c2bd2 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c @@ -28,36 +28,16 @@ int __weak __clzsi2(int val) } EXPORT_SYMBOL(__clzsi2); -int __weak __clzdi2(long val); -int __weak __ctzdi2(long val); -#if BITS_PER_LONG == 32 - -int __weak __clzdi2(long val) +int __weak __clzdi2(u64 val); +int __weak __clzdi2(u64 val) { - return 32 - fls((int)val); + return 64 - fls64(val); } EXPORT_SYMBOL(__clzdi2); -int __weak __ctzdi2(long val) +int __weak __ctzdi2(u64 val); +int __weak __ctzdi2(u64 val) { - return __ffs((u32)val); + return __ffs64(val); } EXPORT_SYMBOL(__ctzdi2); - -#elif BITS_PER_LONG == 64 - -int __weak __clzdi2(long val) -{ - return 64 - fls64((u64)val); -} -EXPORT_SYMBOL(__clzdi2); - -int __weak __ctzdi2(long val) -{ - return __ffs64((u64)val); -} -EXPORT_SYMBOL(__ctzdi2); - -#else -#error BITS_PER_LONG not 32 or 64 -#endif diff --git a/lib/radix-tree.c b/lib/radix-tree.c index b3afafe46fff..c7918b7b8a23 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1134,7 +1134,6 @@ static void set_iter_tags(struct radix_tree_iter *iter, void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - slot++; iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9f9dd968fbe3..bcd71d8736be 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2219,16 +2219,6 @@ static int soft_offline_in_use_page(struct page *page) return __soft_offline_page(page); } -static int soft_offline_free_page(struct page *page) -{ - int rc = 0; - - if (!page_handle_poison(page, true, false)) - rc = -EBUSY; - - return rc; -} - static void put_ref_page(struct page *page) { if (page) @@ -2294,10 +2284,13 @@ retry: if (ret > 0) { ret = soft_offline_in_use_page(page); } else if (ret == 0) { - if (soft_offline_free_page(page) && try_again) { - try_again = false; - flags &= ~MF_COUNT_INCREASED; - goto retry; + if (!page_handle_poison(page, true, false)) { + if (try_again) { + try_again = false; + flags &= ~MF_COUNT_INCREASED; + goto retry; + } + ret = -EBUSY; } } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a3cf56df1cfe..c14481c3a3cd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2808,6 +2808,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) free_vm_area(area); return NULL; } + + flush_cache_vmap((unsigned long)area->addr, + (unsigned long)area->addr + count * PAGE_SIZE); + return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 71999e13f729..5c5ddacd81cb 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -507,7 +507,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_elp_packet *elp_packet; struct batadv_hard_iface *primary_if; - struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + struct ethhdr *ethhdr; bool res; int ret = NET_RX_DROP; @@ -515,6 +515,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, if (!res) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 1d750f3cb2e4..4fe6df68dfcb 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -124,8 +124,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - if (hard_iface->if_status != BATADV_IF_ACTIVE) + if (hard_iface->if_status != BATADV_IF_ACTIVE) { + kfree_skb(skb); return; + } batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, @@ -986,7 +988,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_ogm2_packet *ogm_packet; - struct ethhdr *ethhdr = eth_hdr(skb); + struct ethhdr *ethhdr; int ogm_offset; u8 *packet_pos; int ret = NET_RX_DROP; @@ -1000,6 +1002,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 35fadb924849..44cf612c0831 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -627,7 +627,19 @@ out: */ void batadv_update_min_mtu(struct net_device *soft_iface) { - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(soft_iface); + int limit_mtu; + int mtu; + + mtu = batadv_hardif_min_mtu(soft_iface); + + if (bat_priv->mtu_set_by_user) + limit_mtu = bat_priv->mtu_set_by_user; + else + limit_mtu = ETH_DATA_LEN; + + mtu = min(mtu, limit_mtu); + dev_set_mtu(soft_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 00875e1d8c44..bbd6ecf1678c 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -495,7 +495,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]; atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); + + rtnl_lock(); batadv_update_min_mtu(bat_priv->soft_iface); + rtnl_unlock(); } if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) { diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 6ab28b509d4b..99cd8aef0735 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -154,11 +154,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { + struct batadv_priv *bat_priv = netdev_priv(dev); + /* check ranges */ if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) return -EINVAL; dev->mtu = new_mtu; + bat_priv->mtu_set_by_user = new_mtu; return 0; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4b7ad6684bc4..1e1cf0e8a142 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -774,7 +774,6 @@ check_roaming: if (roamed_back) { batadv_tt_global_free(bat_priv, tt_global, "Roaming canceled"); - tt_global = NULL; } else { /* The global entry has to be marked as ROAMING and * has to be kept for consistency purpose diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 2be5d4a712c5..2635763bbd67 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1547,6 +1547,12 @@ struct batadv_priv { struct net_device *soft_iface; /** + * @mtu_set_by_user: MTU was set once by user + * protected by rtnl_lock + */ + int mtu_set_by_user; + + /** * @bat_counters: mesh internal traffic statistic counters (see * batadv_counters) */ diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 9dd54247029a..0770286ecf0b 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6375,9 +6375,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a267c9b6bcef..756523e5402a 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -1414,6 +1415,7 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; + l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); diff --git a/net/can/raw.c b/net/can/raw.c index 7105fa4824e4..8877d22da67e 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -83,6 +83,7 @@ struct raw_sock { struct sock sk; int bound; int ifindex; + struct net_device *dev; struct list_head notifier; int loopback; int recv_own_msgs; @@ -275,21 +276,24 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, if (!net_eq(dev_net(dev), sock_net(sk))) return; - if (ro->ifindex != dev->ifindex) + if (ro->dev != dev) return; switch (msg) { case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (ro->bound) + if (ro->bound) { raw_disable_allfilters(dev_net(dev), dev, sk); + dev_put(dev); + } if (ro->count > 1) kfree(ro->filter); ro->ifindex = 0; ro->bound = 0; + ro->dev = NULL; ro->count = 0; release_sock(sk); @@ -335,6 +339,7 @@ static int raw_init(struct sock *sk) ro->bound = 0; ro->ifindex = 0; + ro->dev = NULL; /* set default filter to single entry dfilter */ ro->dfilter.can_id = 0; @@ -380,18 +385,14 @@ static int raw_release(struct socket *sock) list_del(&ro->notifier); spin_unlock(&raw_notifier_lock); + rtnl_lock(); lock_sock(sk); /* remove current filters & unregister */ if (ro->bound) { - if (ro->ifindex) { - struct net_device *dev; - - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (dev) { - raw_disable_allfilters(dev_net(dev), dev, sk); - dev_put(dev); - } + if (ro->dev) { + raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); + dev_put(ro->dev); } else { raw_disable_allfilters(sock_net(sk), NULL, sk); } @@ -402,6 +403,7 @@ static int raw_release(struct socket *sock) ro->ifindex = 0; ro->bound = 0; + ro->dev = NULL; ro->count = 0; free_percpu(ro->uniq); @@ -409,6 +411,8 @@ static int raw_release(struct socket *sock) sock->sk = NULL; release_sock(sk); + rtnl_unlock(); + sock_put(sk); return 0; @@ -419,6 +423,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); + struct net_device *dev = NULL; int ifindex; int err = 0; int notify_enetdown = 0; @@ -428,24 +433,23 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (addr->can_family != AF_CAN) return -EINVAL; + rtnl_lock(); lock_sock(sk); if (ro->bound && addr->can_ifindex == ro->ifindex) goto out; if (addr->can_ifindex) { - struct net_device *dev; - dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); if (!dev) { err = -ENODEV; goto out; } if (dev->type != ARPHRD_CAN) { - dev_put(dev); err = -ENODEV; - goto out; + goto out_put_dev; } + if (!(dev->flags & IFF_UP)) notify_enetdown = 1; @@ -453,7 +457,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) /* filters set by default/setsockopt */ err = raw_enable_allfilters(sock_net(sk), dev, sk); - dev_put(dev); + if (err) + goto out_put_dev; + } else { ifindex = 0; @@ -464,26 +470,30 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (!err) { if (ro->bound) { /* unregister old filters */ - if (ro->ifindex) { - struct net_device *dev; - - dev = dev_get_by_index(sock_net(sk), - ro->ifindex); - if (dev) { - raw_disable_allfilters(dev_net(dev), - dev, sk); - dev_put(dev); - } + if (ro->dev) { + raw_disable_allfilters(dev_net(ro->dev), + ro->dev, sk); + /* drop reference to old ro->dev */ + dev_put(ro->dev); } else { raw_disable_allfilters(sock_net(sk), NULL, sk); } } ro->ifindex = ifindex; ro->bound = 1; + /* bind() ok -> hold a reference for new ro->dev */ + ro->dev = dev; + if (ro->dev) + dev_hold(ro->dev); } - out: +out_put_dev: + /* remove potential reference from dev_get_by_index() */ + if (dev) + dev_put(dev); +out: release_sock(sk); + rtnl_unlock(); if (notify_enetdown) { sk->sk_err = ENETDOWN; @@ -549,9 +559,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (!dev) { + dev = ro->dev; + if (ro->bound && dev) { + if (dev->reg_state != NETREG_REGISTERED) { if (count > 1) kfree(filter); err = -ENODEV; @@ -592,7 +602,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, ro->count = count; out_fil: - dev_put(dev); release_sock(sk); rtnl_unlock(); @@ -610,9 +619,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (!dev) { + dev = ro->dev; + if (ro->bound && dev) { + if (dev->reg_state != NETREG_REGISTERED) { err = -ENODEV; goto out_err; } @@ -636,7 +645,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, ro->err_mask = err_mask; out_err: - dev_put(dev); release_sock(sk); rtnl_unlock(); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f6b7436458ae..0c5e0d2c609e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3330,17 +3330,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) int ret; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); + ret = wait_for_completion_killable(&lreq->reg_commit_wait); return ret ?: lreq->reg_commit_error; } -static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) +static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, + unsigned long timeout) { - int ret; + long left; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); - return ret ?: lreq->notify_finish_error; + left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, + ceph_timeout_jiffies(timeout)); + if (left <= 0) + left = left ?: -ETIMEDOUT; + else + left = lreq->notify_finish_error; /* completed */ + + return left; } /* @@ -4890,7 +4897,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) - ret = linger_notify_finish_wait(lreq); + ret = linger_notify_finish_wait(lreq, + msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); else dout("lreq %p failed to initiate notify %d\n", lreq, ret); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 910ca41cb9e6..4953abee79fe 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -521,8 +521,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs) return ERR_PTR(-EPERM); nla_for_each_nested(nla, nla_stgs, rem) { - if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) + if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) { + if (nla_len(nla) != sizeof(u32)) + return ERR_PTR(-EINVAL); nr_maps++; + } } diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL); diff --git a/net/core/filter.c b/net/core/filter.c index 18eb8049c795..458756334c4f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3843,12 +3843,6 @@ BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) if (unlikely(data_end > data_hard_end)) return -EINVAL; - /* ALL drivers MUST init xdp->frame_sz, chicken check below */ - if (unlikely(xdp->frame_sz > PAGE_SIZE)) { - WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); - return -EINVAL; - } - if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 49766446797c..1b71e5c582bb 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2173,13 +2173,27 @@ out_err: return err; } -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr) +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr) { - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, + const struct ifinfomsg *ifmp; + const struct nlattr *attrs; + size_t len; + + ifmp = nla_data(nla_peer); + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); + + if (ifmp->ifi_index < 0) { + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, + "ifindex can't be negative"); + return -EINVAL; + } + + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, exterr); } -EXPORT_SYMBOL(rtnl_nla_parse_ifla); +EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { @@ -3280,6 +3294,7 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct ifinfomsg *ifm; char ifname[IFNAMSIZ]; struct nlattr **data; + bool link_specified; int err; #ifdef CONFIG_MODULES @@ -3300,12 +3315,19 @@ replay: ifname[0] = '\0'; ifm = nlmsg_data(nlh); - if (ifm->ifi_index > 0) + if (ifm->ifi_index > 0) { + link_specified = true; dev = __dev_get_by_index(net, ifm->ifi_index); - else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) + } else if (ifm->ifi_index < 0) { + NL_SET_ERR_MSG(extack, "ifindex can't be negative"); + return -EINVAL; + } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { + link_specified = true; dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); - else + } else { + link_specified = false; dev = NULL; + } master_dev = NULL; m_ops = NULL; @@ -3408,7 +3430,12 @@ replay: } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) + /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, + * or it's for a group + */ + if (link_specified) + return -ENODEV; + if (tb[IFLA_GROUP]) return rtnl_group_changelink(skb, net, nla_get_u32(tb[IFLA_GROUP]), ifm, extack, tb); @@ -4919,13 +4946,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) { if (nla_len(attr) < sizeof(flags)) return -EINVAL; have_flags = true; flags = nla_get_u16(attr); - break; + } + + if (nla_type(attr) == IFLA_BRIDGE_MODE) { + if (nla_len(attr) < sizeof(u16)) + return -EINVAL; } } } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index dc9b93d8f0d3..9cd14212dcd0 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1124,13 +1124,19 @@ static void sk_psock_strp_data_ready(struct sock *sk) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) { + int ret; + static const struct strp_callbacks cb = { .rcv_msg = sk_psock_strp_read, .read_sock_done = sk_psock_strp_read_done, .parse_msg = sk_psock_strp_parse, }; - return strp_init(&psock->strp, sk, &cb); + ret = strp_init(&psock->strp, sk, &cb); + if (!ret) + sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); + + return ret; } void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) @@ -1158,7 +1164,7 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) static void sk_psock_done_strp(struct sk_psock *psock) { /* Parser has been stopped */ - if (psock->progs.stream_parser) + if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) strp_done(&psock->strp); } #else diff --git a/net/core/sock.c b/net/core/sock.c index cf1e437ae487..ae1e9e2b8255 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1297,7 +1297,8 @@ set_sndbuf: cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); - sk->sk_max_pacing_rate = ulval; + /* Pairs with READ_ONCE() from sk_getsockopt() */ + WRITE_ONCE(sk->sk_max_pacing_rate, ulval); sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); break; } @@ -1455,11 +1456,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_SNDBUF: - v.val = sk->sk_sndbuf; + v.val = READ_ONCE(sk->sk_sndbuf); break; case SO_RCVBUF: - v.val = sk->sk_rcvbuf; + v.val = READ_ONCE(sk->sk_rcvbuf); break; case SO_REUSEADDR: @@ -1548,7 +1549,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_RCVLOWAT: - v.val = sk->sk_rcvlowat; + v.val = READ_ONCE(sk->sk_rcvlowat); break; case SO_SNDLOWAT: @@ -1642,7 +1643,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, if (!sock->ops->set_peek_off) return -EOPNOTSUPP; - v.val = sk->sk_peek_off; + v.val = READ_ONCE(sk->sk_peek_off); break; case SO_NOFCS: v.val = sock_flag(sk, SOCK_NOFCS); @@ -1672,7 +1673,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: - v.val = sk->sk_ll_usec; + v.val = READ_ONCE(sk->sk_ll_usec); break; case SO_PREFER_BUSY_POLL: v.val = READ_ONCE(sk->sk_prefer_busy_poll); @@ -1680,12 +1681,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname, #endif case SO_MAX_PACING_RATE: + /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { lv = sizeof(v.ulval); - v.ulval = sk->sk_max_pacing_rate; + v.ulval = READ_ONCE(sk->sk_max_pacing_rate); } else { /* 32bit version */ - v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); + v.val = min_t(unsigned long, ~0U, + READ_ONCE(sk->sk_max_pacing_rate)); } break; @@ -2877,7 +2880,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); - if (sk_under_memory_pressure(sk) && + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } @@ -2898,7 +2901,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim); int sk_set_peek_off(struct sock *sk, int val) { - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); return 0; } EXPORT_SYMBOL_GPL(sk_set_peek_off); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 86b4e8909ad1..caae43e66353 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -117,7 +117,6 @@ static void sock_map_sk_acquire(struct sock *sk) __acquires(&sk->sk_lock.slock) { lock_sock(sk); - preempt_disable(); rcu_read_lock(); } @@ -125,7 +124,6 @@ static void sock_map_sk_release(struct sock *sk) __releases(&sk->sk_lock.slock) { rcu_read_unlock(); - preempt_enable(); release_sock(sk); } @@ -150,13 +148,13 @@ static void sock_map_del_link(struct sock *sk, list_for_each_entry_safe(link, tmp, &psock->link, list) { if (link->link_raw == link_raw) { struct bpf_map *map = link->map; - struct bpf_stab *stab = container_of(map, struct bpf_stab, - map); - if (psock->saved_data_ready && stab->progs.stream_parser) + struct sk_psock_progs *progs = sock_map_progs(map); + + if (psock->saved_data_ready && progs->stream_parser) strp_stop = true; - if (psock->saved_data_ready && stab->progs.stream_verdict) + if (psock->saved_data_ready && progs->stream_verdict) verdict_stop = true; - if (psock->saved_data_ready && stab->progs.skb_verdict) + if (psock->saved_data_ready && progs->skb_verdict) verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index dc4fb699b56c..d2981e89d363 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -946,7 +946,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, - tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest, + tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; diff --git a/net/dccp/output.c b/net/dccp/output.c index b8a24734385e..fd2eb148d24d 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -187,7 +187,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; - dp->dccps_mss_cache = cur_mps; + WRITE_ONCE(dp->dccps_mss_cache, cur_mps); return cur_mps; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a23b19663601..0b0567a692a8 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect); __poll_t dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask; struct sock *sk = sock->sk; + __poll_t mask; + u8 shutdown; + int state; sock_poll_wait(file, sock, wait); - if (sk->sk_state == DCCP_LISTEN) + + state = inet_sk_state_load(sk); + if (state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events @@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, */ mask = 0; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) mask = EPOLLERR; + shutdown = READ_ONCE(sk->sk_shutdown); - if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) + if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected? */ - if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { + if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, } return mask; } - EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) @@ -639,7 +643,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: - val = dp->dccps_mss_cache; + val = READ_ONCE(dp->dccps_mss_cache); break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); @@ -748,7 +752,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) trace_dccp_probe(sk, len); - if (len > dp->dccps_mss_cache) + if (len > READ_ONCE(dp->dccps_mss_cache)) return -EMSGSIZE; lock_sock(sk); @@ -781,6 +785,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out_discard; } + /* We need to check dccps_mss_cache after socket is locked. */ + if (len > dp->dccps_mss_cache) { + rc = -EMSGSIZE; + goto out_discard; + } + skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index cc1caab4a654..d3275d1ed260 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) .un.frag.__unused = 0, .un.frag.mtu = htons(mtu), }; - icmph->checksum = ip_compute_csum(icmph, len); + icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0)); skb_reset_transport_header(skb); niph = skb_push(skb, sizeof(*niph)); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index efe25a0172e6..df23319adc80 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; default: goto tx_err; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 6cc7d347ec0a..c140a36bd1e6 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3222,13 +3222,9 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) &rtm_dump_nexthop_cb, &filter); if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; @@ -3368,25 +3364,19 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, dd->filter.res_bucket_nh_id != nhge->nh->id) continue; + dd->ctx->bucket_index = bucket_index; err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, RTM_NEWNEXTHOPBUCKET, portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); - if (err < 0) { - if (likely(skb->len)) - goto out; - goto out_err; - } + if (err) + return err; } dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1; - bucket_index = 0; + dd->ctx->bucket_index = 0; -out: - err = skb->len; -out_err: - dd->ctx->bucket_index = bucket_index; - return err; + return 0; } static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, @@ -3435,13 +3425,9 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb, if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index d58e672be31c..5df97aaac252 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -40,7 +40,7 @@ struct tcp_fastopen_metrics { struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; - possible_net_t tcpm_net; + struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; @@ -51,34 +51,38 @@ struct tcp_metrics_block { struct rcu_head rcu_head; }; -static inline struct net *tm_net(struct tcp_metrics_block *tm) +static inline struct net *tm_net(const struct tcp_metrics_block *tm) { - return read_pnet(&tm->tcpm_net); + /* Paired with the WRITE_ONCE() in tcpm_new() */ + return READ_ONCE(tm->tcpm_net); } static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_lock & (1 << idx); + /* Paired with WRITE_ONCE() in tcpm_suck_dst() */ + return READ_ONCE(tm->tcpm_lock) & (1 << idx); } -static u32 tcp_metric_get(struct tcp_metrics_block *tm, +static u32 tcp_metric_get(const struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_vals[idx]; + /* Paired with WRITE_ONCE() in tcp_metric_set() */ + return READ_ONCE(tm->tcpm_vals[idx]); } static void tcp_metric_set(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { - tm->tcpm_vals[idx] = val; + /* Paired with READ_ONCE() in tcp_metric_get() */ + WRITE_ONCE(tm->tcpm_vals[idx], val); } static bool addr_same(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { - return inetpeer_addr_cmp(a, b) == 0; + return (a->family == b->family) && !inetpeer_addr_cmp(a, b); } struct tcpm_hash_bucket { @@ -89,6 +93,7 @@ static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; static unsigned int tcp_metrics_hash_log __read_mostly; static DEFINE_SPINLOCK(tcp_metrics_lock); +static DEFINE_SEQLOCK(fastopen_seqlock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, const struct dst_entry *dst, @@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, u32 msval; u32 val; - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); val = 0; if (dst_metric_locked(dst, RTAX_RTT)) @@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, val |= 1 << TCP_METRIC_CWND; if (dst_metric_locked(dst, RTAX_REORDERING)) val |= 1 << TCP_METRIC_REORDERING; - tm->tcpm_lock = val; + /* Paired with READ_ONCE() in tcp_metric_locked() */ + WRITE_ONCE(tm->tcpm_lock, val); msval = dst_metric_raw(dst, RTAX_RTT); - tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; + tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC); msval = dst_metric_raw(dst, RTAX_RTTVAR); - tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC; - tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); - tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); - tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); + tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC); + tcp_metric_set(tm, TCP_METRIC_SSTHRESH, + dst_metric_raw(dst, RTAX_SSTHRESH)); + tcp_metric_set(tm, TCP_METRIC_CWND, + dst_metric_raw(dst, RTAX_CWND)); + tcp_metric_set(tm, TCP_METRIC_REORDERING, + dst_metric_raw(dst, RTAX_REORDERING)); if (fastopen_clear) { + write_seqlock(&fastopen_seqlock); tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; tm->tcpm_fastopen.try_exp = 0; tm->tcpm_fastopen.cookie.exp = false; tm->tcpm_fastopen.cookie.len = 0; + write_sequnlock(&fastopen_seqlock); } } #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) -static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) +static void tcpm_check_stamp(struct tcp_metrics_block *tm, + const struct dst_entry *dst) { - if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) + unsigned long limit; + + if (!tm) + return; + limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT; + if (unlikely(time_after(jiffies, limit))) tcpm_suck_dst(tm, dst, false); } @@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, oldest = deref_locked(tcp_metrics_hash[hash].chain); for (tm = deref_locked(oldest->tcpm_next); tm; tm = deref_locked(tm->tcpm_next)) { - if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) + if (time_before(READ_ONCE(tm->tcpm_stamp), + READ_ONCE(oldest->tcpm_stamp))) oldest = tm; } tm = oldest; } else { - tm = kmalloc(sizeof(*tm), GFP_ATOMIC); + tm = kzalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } - write_pnet(&tm->tcpm_net, net); + /* Paired with the READ_ONCE() in tm_net() */ + WRITE_ONCE(tm->tcpm_net, net); + tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; - tcpm_suck_dst(tm, dst, true); + tcpm_suck_dst(tm, dst, reclaim); if (likely(!reclaim)) { tm->tcpm_next = tcp_metrics_hash[hash].chain; @@ -434,7 +454,7 @@ void tcp_update_metrics(struct sock *sk) tp->reordering); } } - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); out_unlock: rcu_read_unlock(); } @@ -539,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) return ret; } -static DEFINE_SEQLOCK(fastopen_seqlock); - void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie) { @@ -647,7 +665,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, - jiffies - tm->tcpm_stamp, + jiffies - READ_ONCE(tm->tcpm_stamp), TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; @@ -658,7 +676,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, if (!nest) goto nla_put_failure; for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { - u32 val = tm->tcpm_vals[i]; + u32 val = tcp_metric_get(tm, i); if (!val) continue; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 50bba370486e..a8592c187b32 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -582,7 +582,9 @@ out_reset_timer: tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; - icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + icsk->icsk_rto = clamp(__tcp_set_rto(tp), + tcp_rto_min(sk), + TCP_RTO_MAX); } else { /* Use normal (exponential) backoff */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 42c37ec832f1..190aa3b19591 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -570,12 +570,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_addr_conflict(t, ipv6_hdr(skb))) goto tx_err; - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; default: goto tx_err; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 91f1c5f56d5f..ee094645c7ce 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1068,7 +1068,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, And all this only to mangle msg->im6_msgtype and to set msg->im6_mbz to "mbz" :-) */ - skb_push(skb, -skb_network_offset(pkt)); + __skb_pull(skb, skb_network_offset(pkt)); skb_push(skb, sizeof(*msg)); skb_reset_transport_header(skb); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8108e9a941d0..3ab903f7e0f8 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -196,7 +196,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(const struct net_device *dev, struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS || + return opt->nd_opt_type == ND_OPT_PREFIX_INFO || + opt->nd_opt_type == ND_OPT_RDNSS || opt->nd_opt_type == ND_OPT_DNSSL || opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL || opt->nd_opt_type == ND_OPT_PREF64 || diff --git a/net/key/af_key.c b/net/key/af_key.c index d34fed1a484a..258fa046f440 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; - if ((xfilter->sadb_x_filter_splen >= + if ((xfilter->sadb_x_filter_splen > (sizeof(xfrm_address_t) << 3)) || - (xfilter->sadb_x_filter_dplen >= + (xfilter->sadb_x_filter_dplen > (sizeof(xfrm_address_t) << 3))) { mutex_unlock(&pfk->dump_lock); return -EINVAL; diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 6447a09932f5..069c2659074b 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -611,14 +611,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr) return 0; } -/* Response handler for Mellanox command Get Mac Address */ -static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr) +/* Response handler for Get Mac Address command */ +static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) { struct ncsi_dev_priv *ndp = nr->ndp; struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; struct ncsi_rsp_oem_pkt *rsp; struct sockaddr saddr; + u32 mac_addr_off = 0; int ret = 0; /* Get the response header */ @@ -626,11 +626,25 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr) saddr.sa_family = ndev->type; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN); + if (mfr_id == NCSI_OEM_MFR_BCM_ID) + mac_addr_off = BCM_MAC_ADDR_OFFSET; + else if (mfr_id == NCSI_OEM_MFR_MLX_ID) + mac_addr_off = MLX_MAC_ADDR_OFFSET; + else if (mfr_id == NCSI_OEM_MFR_INTEL_ID) + mac_addr_off = INTEL_MAC_ADDR_OFFSET; + + memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN); + if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID) + eth_addr_inc((u8 *)saddr.sa_data); + if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) + return -ENXIO; + /* Set the flag for GMA command which should only be called once */ ndp->gma_flag = 1; - ret = ops->ndo_set_mac_address(ndev, &saddr); + rtnl_lock(); + ret = dev_set_mac_address(ndev, &saddr, NULL); + rtnl_unlock(); if (ret < 0) netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); @@ -649,41 +663,10 @@ static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr) if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA && mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM) - return ncsi_rsp_handler_oem_mlx_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_MLX_ID); return 0; } -/* Response handler for Broadcom command Get Mac Address */ -static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr) -{ - struct ncsi_dev_priv *ndp = nr->ndp; - struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; - struct ncsi_rsp_oem_pkt *rsp; - struct sockaddr saddr; - int ret = 0; - - /* Get the response header */ - rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); - - saddr.sa_family = ndev->type; - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN); - /* Increase mac address by 1 for BMC's address */ - eth_addr_inc((u8 *)saddr.sa_data); - if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) - return -ENXIO; - - /* Set the flag for GMA command which should only be called once */ - ndp->gma_flag = 1; - - ret = ops->ndo_set_mac_address(ndev, &saddr); - if (ret < 0) - netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); - - return ret; -} - /* Response handler for Broadcom card */ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr) { @@ -695,42 +678,10 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr) bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data); if (bcm->type == NCSI_OEM_BCM_CMD_GMA) - return ncsi_rsp_handler_oem_bcm_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_BCM_ID); return 0; } -/* Response handler for Intel command Get Mac Address */ -static int ncsi_rsp_handler_oem_intel_gma(struct ncsi_request *nr) -{ - struct ncsi_dev_priv *ndp = nr->ndp; - struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; - struct ncsi_rsp_oem_pkt *rsp; - struct sockaddr saddr; - int ret = 0; - - /* Get the response header */ - rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); - - saddr.sa_family = ndev->type; - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[INTEL_MAC_ADDR_OFFSET], ETH_ALEN); - /* Increase mac address by 1 for BMC's address */ - eth_addr_inc((u8 *)saddr.sa_data); - if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) - return -ENXIO; - - /* Set the flag for GMA command which should only be called once */ - ndp->gma_flag = 1; - - ret = ops->ndo_set_mac_address(ndev, &saddr); - if (ret < 0) - netdev_warn(ndev, - "NCSI: 'Writing mac address to device failed\n"); - - return ret; -} - /* Response handler for Intel card */ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr) { @@ -742,7 +693,7 @@ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr) intel = (struct ncsi_rsp_oem_intel_pkt *)(rsp->data); if (intel->cmd == NCSI_OEM_INTEL_CMD_GMA) - return ncsi_rsp_handler_oem_intel_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_INTEL_ID); return 0; } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 29ec3ef63edc..d0b64c36471d 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1802,6 +1802,7 @@ static int proc_do_sync_threshold(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { + struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; @@ -1811,6 +1812,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, .mode = table->mode, }; + mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { @@ -1820,6 +1822,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, else memcpy(valp, val, sizeof(val)); } + mutex_unlock(&ipvs->sync_mutex); return rc; } @@ -4077,6 +4080,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; + tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 895e0ca54299..7247af51bdfc 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, }; @@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ce9f962380b7..d84da11aaee5 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -255,8 +255,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain) if (chain->bound) return -EBUSY; + if (!nft_use_inc(&chain->use)) + return -EMFILE; + chain->bound = true; - chain->use++; nft_chain_trans_bind(ctx, chain); return 0; @@ -439,7 +441,7 @@ static int nft_delchain(struct nft_ctx *ctx) if (IS_ERR(trans)) return PTR_ERR(trans); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nft_deactivate_next(ctx->net, ctx->chain); return 0; @@ -478,7 +480,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) /* You cannot delete the same rule twice */ if (nft_is_active_next(ctx->net, rule)) { nft_deactivate_next(ctx->net, rule); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); return 0; } return -ENOENT; @@ -645,7 +647,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) nft_map_deactivate(ctx, set); nft_deactivate_next(ctx->net, set); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -677,7 +679,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj) return err; nft_deactivate_next(ctx->net, obj); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -712,7 +714,7 @@ static int nft_delflowtable(struct nft_ctx *ctx, return err; nft_deactivate_next(ctx->net, flowtable); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -2263,9 +2265,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_rule **rules; int err; - if (table->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_CHAIN_HOOK]) { struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook; @@ -2362,6 +2361,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) goto err_destroy_chain; + if (!nft_use_inc(&table->use)) { + err = -EMFILE; + goto err_use; + } + trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (IS_ERR(trans)) { err = PTR_ERR(trans); @@ -2378,10 +2382,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; } - table->use++; - return 0; + err_unregister_hook: + nft_use_dec_restore(&table->use); +err_use: nf_tables_unregister_hook(net, table, chain); err_destroy_chain: nf_tables_chain_destroy(ctx); @@ -3566,9 +3571,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return -EINVAL; handle = nf_tables_alloc_handle(table); - if (chain->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_RULE_POSITION]) { pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); old_rule = __nft_rule_lookup(chain, pos_handle); @@ -3662,6 +3664,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, } } + if (!nft_use_inc(&chain->use)) { + err = -EMFILE; + goto err_release_rule; + } + if (info->nlh->nlmsg_flags & NLM_F_REPLACE) { err = nft_delrule(&ctx, old_rule); if (err < 0) @@ -3693,7 +3700,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, } } kvfree(expr_info); - chain->use++; if (flow) nft_trans_flow_rule(trans) = flow; @@ -3704,6 +3710,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return 0; err_destroy_flow_rule: + nft_use_dec_restore(&chain->use); if (flow) nft_flow_rule_destroy(flow); err_release_rule: @@ -4721,9 +4728,15 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, alloc_size = sizeof(*set) + size + udlen; if (alloc_size < size || alloc_size > INT_MAX) return -ENOMEM; + + if (!nft_use_inc(&table->use)) + return -EMFILE; + set = kvzalloc(alloc_size, GFP_KERNEL); - if (!set) - return -ENOMEM; + if (!set) { + err = -ENOMEM; + goto err_alloc; + } name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL); if (!name) { @@ -4781,7 +4794,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, goto err_set_expr_alloc; list_add_tail_rcu(&set->list, &table->sets); - table->use++; + return 0; err_set_expr_alloc: @@ -4793,6 +4806,9 @@ err_set_init: kfree(set->name); err_set_name: kvfree(set); +err_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -4927,9 +4943,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *i; struct nft_set_iter iter; - if (set->use == UINT_MAX) - return -EOVERFLOW; - if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY; @@ -4957,10 +4970,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, return iter.err; } bind: + if (!nft_use_inc(&set->use)) + return -EMFILE; + binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); nft_set_trans_bind(ctx, set); - set->use++; return 0; } @@ -5034,7 +5049,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) nft_clear(ctx->net, set); } - set->use++; + nft_use_inc_restore(&set->use); } EXPORT_SYMBOL_GPL(nf_tables_activate_set); @@ -5050,7 +5065,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, else list_del_rcu(&binding->list); - set->use--; + nft_use_dec(&set->use); break; case NFT_TRANS_PREPARE: if (nft_set_is_anonymous(set)) { @@ -5059,7 +5074,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, nft_deactivate_next(ctx->net, set); } - set->use--; + nft_use_dec(&set->use); return; case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: @@ -5067,7 +5082,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(ctx, set); - set->use--; + nft_use_dec(&set->use); fallthrough; default: nf_tables_unbind_set(ctx, set, binding, @@ -5799,7 +5814,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext)); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); kfree(elem); } EXPORT_SYMBOL_GPL(nft_set_elem_destroy); @@ -6290,8 +6305,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, set->objtype, genmask); if (IS_ERR(obj)) { err = PTR_ERR(obj); + obj = NULL; + goto err_parse_key_end; + } + + if (!nft_use_inc(&obj->use)) { + err = -EMFILE; + obj = NULL; goto err_parse_key_end; } + err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF); if (err < 0) goto err_parse_key_end; @@ -6363,10 +6386,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, udata->len = ulen - 1; nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen); } - if (obj) { + if (obj) *nft_set_ext_obj(ext) = obj; - obj->use++; - } + err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs); if (err < 0) goto err_elem_expr; @@ -6421,14 +6443,14 @@ err_set_full: err_element_clash: kfree(trans); err_elem_expr: - if (obj) - obj->use--; - nf_tables_set_elem_destroy(ctx, set, elem.priv); err_parse_data: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_release(&elem.data.val, desc.type); err_parse_key_end: + if (obj) + nft_use_dec_restore(&obj->use); + nft_data_release(&elem.key_end.val, NFT_DATA_VALUE); err_parse_key: nft_data_release(&elem.key.val, NFT_DATA_VALUE); @@ -6507,7 +6529,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use++; + nft_use_inc_restore(&chain->use); break; } } @@ -6522,7 +6544,7 @@ static void nft_setelem_data_activate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_hold(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use++; + nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use); } static void nft_setelem_data_deactivate(const struct net *net, @@ -6534,7 +6556,7 @@ static void nft_setelem_data_deactivate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_release(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, @@ -6697,6 +6719,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx, ret = __nft_set_catchall_flush(ctx, set, &elem); if (ret < 0) break; + nft_set_elem_change_active(ctx->net, set, ext); } return ret; @@ -7069,9 +7092,14 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + type = nft_obj_type_get(net, objtype); - if (IS_ERR(type)) - return PTR_ERR(type); + if (IS_ERR(type)) { + err = PTR_ERR(type); + goto err_type; + } obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]); if (IS_ERR(obj)) { @@ -7105,7 +7133,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, goto err_obj_ht; list_add_tail_rcu(&obj->list, &table->objects); - table->use++; + return 0; err_obj_ht: /* queued in transaction log */ @@ -7121,6 +7149,9 @@ err_strdup: kfree(obj); err_init: module_put(type->owner); +err_type: + nft_use_dec_restore(&table->use); + return err; } @@ -7511,7 +7542,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, case NFT_TRANS_PREPARE: case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: - flowtable->use--; + nft_use_dec(&flowtable->use); fallthrough; default: return; @@ -7859,9 +7890,14 @@ static int nf_tables_newflowtable(struct sk_buff *skb, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL); - if (!flowtable) - return -ENOMEM; + if (!flowtable) { + err = -ENOMEM; + goto flowtable_alloc; + } flowtable->table = table; flowtable->handle = nf_tables_alloc_handle(table); @@ -7916,7 +7952,6 @@ static int nf_tables_newflowtable(struct sk_buff *skb, goto err5; list_add_tail_rcu(&flowtable->list, &table->flowtables); - table->use++; return 0; err5: @@ -7933,6 +7968,9 @@ err2: kfree(flowtable->name); err1: kfree(flowtable); +flowtable_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -9169,7 +9207,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) */ if (nft_set_is_anonymous(nft_trans_set(trans)) && !list_empty(&nft_trans_set(trans)->bindings)) - trans->ctx.table->use--; + nft_use_dec(&trans->ctx.table->use); } nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); @@ -9388,7 +9426,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_chain_del(trans->ctx.chain); nf_tables_unregister_hook(trans->ctx.net, trans->ctx.table, @@ -9396,7 +9434,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) } break; case NFT_MSG_DELCHAIN: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, trans->ctx.chain); nft_trans_destroy(trans); break; @@ -9405,7 +9443,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.chain->use--; + nft_use_dec_restore(&trans->ctx.chain->use); list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans), @@ -9414,7 +9452,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_DELRULE: - trans->ctx.chain->use++; + nft_use_inc_restore(&trans->ctx.chain->use); nft_clear(trans->ctx.net, nft_trans_rule(trans)); nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) @@ -9427,7 +9465,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; @@ -9435,7 +9473,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_set(trans)); if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_activate(&trans->ctx, nft_trans_set(trans)); @@ -9478,12 +9516,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_obj_del(nft_trans_obj(trans)); } break; case NFT_MSG_DELOBJ: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_obj(trans)); nft_trans_destroy(trans); break; @@ -9492,7 +9530,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable_hooks(trans)); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); list_del_rcu(&nft_trans_flowtable(trans)->list); nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable(trans)->hook_list); @@ -9503,7 +9541,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_splice(&nft_trans_flowtable_hooks(trans), &nft_trans_flowtable(trans)->hook_list); } else { - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); } nft_trans_destroy(trans); @@ -9956,8 +9994,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, if (desc->flags & NFT_DATA_DESC_SETELEM && chain->flags & NFT_CHAIN_BINDING) return -EINVAL; + if (!nft_use_inc(&chain->use)) + return -EMFILE; - chain->use++; data->verdict.chain = chain; break; } @@ -9975,7 +10014,7 @@ static void nft_verdict_uninit(const struct nft_data *data) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use--; + nft_use_dec(&chain->use); break; } } @@ -10144,11 +10183,11 @@ int __nft_release_basechain(struct nft_ctx *ctx) nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); nf_tables_rule_release(ctx, rule); } nft_chain_del(ctx->chain); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nf_tables_chain_destroy(ctx); return 0; @@ -10201,18 +10240,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table) ctx.chain = chain; list_for_each_entry_safe(rule, nr, &chain->rules, list) { list_del(&rule->list); - chain->use--; + nft_use_dec(&chain->use); nf_tables_rule_release(&ctx, rule); } } list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { list_del(&flowtable->list); - table->use--; + nft_use_dec(&table->use); nf_tables_flowtable_destroy(flowtable); } list_for_each_entry_safe(set, ns, &table->sets, list) { list_del(&set->list); - table->use--; + nft_use_dec(&table->use); if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(&ctx, set); @@ -10220,13 +10259,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table) } list_for_each_entry_safe(obj, ne, &table->objects, list) { nft_obj_del(obj); - table->use--; + nft_use_dec(&table->use); nft_obj_destroy(&ctx, obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { ctx.chain = chain; nft_chain_del(chain); - table->use--; + nft_use_dec(&table->use); nf_tables_chain_destroy(&ctx); } nf_tables_table_destroy(&ctx); @@ -10264,7 +10303,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, deleted = 0; mutex_lock(&nft_net->commit_mutex); if (!list_empty(&nf_tables_destroy_list)) - rcu_barrier(); + nf_tables_trans_destroy_flush_work(); again: list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 29c7ae8789e9..73e606372b05 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set); + if (set->flags & NFT_SET_OBJECT) + return -EOPNOTSUPP; + if (set->ops->update == NULL) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index aac6db8680d4..a5fc7213be3e 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -381,8 +381,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx, if (IS_ERR(flowtable)) return PTR_ERR(flowtable); + if (!nft_use_inc(&flowtable->use)) + return -EMFILE; + priv->flowtable = flowtable; - flowtable->use++; return nf_ct_netns_get(ctx->net, ctx->family); } @@ -401,7 +403,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, { struct nft_flow_offload *priv = nft_expr_priv(expr); - priv->flowtable->use++; + nft_use_inc_restore(&priv->flowtable->use); } static void nft_flow_offload_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 6bf1c852e8ea..7d5b63c5a30a 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -168,7 +168,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, nft_immediate_chain_deactivate(ctx, chain, phase); nft_chain_del(chain); chain->bound = false; - chain->table->use--; + nft_use_dec(&chain->table->use); break; } break; @@ -207,7 +207,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, * let the transaction records release this chain and its rules. */ if (chain->bound) { - chain->use--; + nft_use_dec(&chain->use); break; } @@ -215,9 +215,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, chain_ctx = *ctx; chain_ctx.chain = chain; - chain->use--; + nft_use_dec(&chain->use); list_for_each_entry_safe(rule, n, &chain->rules, list) { - chain->use--; + nft_use_dec(&chain->use); list_del(&rule->list); nf_tables_rule_destroy(&chain_ctx, rule); } diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 3ff91bcaa5f2..156787b76667 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx, if (IS_ERR(obj)) return -ENOENT; + if (!nft_use_inc(&obj->use)) + return -EMFILE; + nft_objref_priv(expr) = obj; - obj->use++; return 0; } @@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx, if (phase == NFT_TRANS_COMMIT) return; - obj->use--; + nft_use_dec(&obj->use); } static void nft_objref_activate(const struct nft_ctx *ctx, @@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx, { struct nft_object *obj = nft_objref_priv(expr); - obj->use++; + nft_use_inc_restore(&obj->use); } static struct nft_expr_type nft_objref_type; diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index a81829c10fea..8c16681884b7 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k, int mask_bits) { - int rule = f->rules++, group, ret, bit_offset = 0; + int rule = f->rules, group, ret, bit_offset = 0; - ret = pipapo_resize(f, f->rules - 1, f->rules); + ret = pipapo_resize(f, f->rules, f->rules + 1); if (ret) return ret; + f->rules++; + for (group = 0; group < f->groups; group++) { int i, v; u8 mask; @@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f, step++; if (step >= len) { if (!masks) { - pipapo_insert(f, base, 0); + err = pipapo_insert(f, base, 0); + if (err < 0) + return err; masks = 1; } goto out; @@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, else ret = pipapo_expand(f, start, end, f->groups * f->bb); + if (ret < 0) + return ret; + if (f->bsize > bsize_max) bsize_max = f->bsize; @@ -1665,6 +1672,17 @@ static void nft_pipapo_commit(const struct nft_set *set) priv->clone = new_clone; } +static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set) +{ +#ifdef CONFIG_PROVE_LOCKING + const struct net *net = read_pnet(&set->net); + + return lockdep_is_held(&nft_pernet(net)->commit_mutex); +#else + return true; +#endif +} + static void nft_pipapo_abort(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); @@ -1673,7 +1691,7 @@ static void nft_pipapo_abort(const struct nft_set *set) if (!priv->dirty) return; - m = rcu_dereference(priv->match); + m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set)); new_clone = pipapo_clone(m); if (IS_ERR(new_clone)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ccf84ce41fd7..62c0fbb9de81 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -368,18 +368,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; + /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: - h.h1->tp_status = status; + WRITE_ONCE(h.h1->tp_status, status); flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: - h.h2->tp_status = status; + WRITE_ONCE(h.h2->tp_status, status); flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: - h.h3->tp_status = status; + WRITE_ONCE(h.h3->tp_status, status); flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: @@ -396,17 +398,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame) smp_rmb(); + /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); - return h.h1->tp_status; + return READ_ONCE(h.h1->tp_status); case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); - return h.h2->tp_status; + return READ_ONCE(h.h2->tp_status); case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); - return h.h3->tp_status; + return READ_ONCE(h.h3->tp_status); default: WARN(1, "TPACKET version not supported.\n"); BUG(); diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index ea52c320f67c..a2f53aee3909 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; fnew->id = f->id; - fnew->res = f->res; fnew->ifindex = f->ifindex; fnew->tp = f->tp; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 48712bc51bda..194468d0355a 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (fold) { f->id = fold->id; f->iif = fold->iif; - f->res = fold->res; f->handle = fold->handle; f->tp = fold->tp; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0025fa837e85..17edcf1d1c3b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -812,7 +812,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, new->ifindex = n->ifindex; new->fshift = n->fshift; - new->res = n->res; new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); @@ -999,18 +998,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } + /* At this point, we need to derive the new handle that will be used to + * uniquely map the identity of this table match entry. The + * identity of the entry that we need to construct is 32 bits made of: + * htid(12b):bucketid(8b):node/entryid(12b) + * + * At this point _we have the table(ht)_ in which we will insert this + * entry. We carry the table's id in variable "htid". + * Note that earlier code picked the ht selection either by a) the user + * providing the htid specified via TCA_U32_HASH attribute or b) when + * no such attribute is passed then the root ht, is default to at ID + * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0. + * If OTOH the user passed us the htid, they may also pass a bucketid of + * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is + * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be + * passed via the htid, so even if it was non-zero it will be ignored. + * + * We may also have a handle, if the user passed one. The handle also + * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b). + * Rule: the bucketid on the handle is ignored even if one was passed; + * rather the value on "htid" is always assumed to be the bucketid. + */ if (handle) { + /* Rule: The htid from handle and tableid from htid must match */ if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); return -EINVAL; } - handle = htid | TC_U32_NODE(handle); - err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, - GFP_KERNEL); - if (err) - return err; - } else + /* Ok, so far we have a valid htid(12b):bucketid(8b) but we + * need to finalize the table entry identification with the last + * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for + * entries. Rule: nodeid of 0 is reserved only for tables(see + * earlier code which processes TC_U32_DIVISOR attribute). + * Rule: The nodeid can only be derived from the handle (and not + * htid). + * Rule: if the handle specified zero for the node id example + * 0x60000000, then pick a new nodeid from the pool of IDs + * this hash table has been allocating from. + * If OTOH it is specified (i.e for example the user passed a + * handle such as 0x60000123), then we use it generate our final + * handle which is used to uniquely identify the match entry. + */ + if (!TC_U32_NODE(handle)) { + handle = gen_new_kid(ht, htid); + } else { + handle = htid | TC_U32_NODE(handle); + err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, + handle, GFP_KERNEL); + if (err) + return err; + } + } else { + /* The user did not give us a handle; lets just generate one + * from the table's pool of nodeids. + */ handle = gen_new_kid(ht, htid); + } if (tb[TCA_U32_SEL] == NULL) { NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 328db5e1b0ea..fa79dbd3601f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1513,10 +1513,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +static bool req_create_or_replace(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_REPLACE); +} + +static bool req_create_exclusive(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_EXCL); +} + +static bool req_change(struct nlmsghdr *n) +{ + return (!(n->nlmsg_flags & NLM_F_CREATE) && + !(n->nlmsg_flags & NLM_F_REPLACE) && + !(n->nlmsg_flags & NLM_F_EXCL)); +} + /* * Create/change qdisc. */ - static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { @@ -1613,27 +1631,35 @@ replay: * * We know, that some child q is already * attached to this parent and have choice: - * either to change it or to create/graft new one. + * 1) change it or 2) create/graft new one. + * If the requested qdisc kind is different + * than the existing one, then we choose graft. + * If they are the same then this is "change" + * operation - just let it fallthrough.. * * 1. We are allowed to create/graft only - * if CREATE and REPLACE flags are set. + * if the request is explicitly stating + * "please create if it doesn't exist". * - * 2. If EXCL is set, requestor wanted to say, - * that qdisc tcm_handle is not expected + * 2. If the request is to exclusive create + * then the qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. + * This will happen when for example tc + * utility issues a "change" command. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. - * For now we select create/graft, if - * user gave KIND, which does not match existing. + * For now we select create/graft. */ - if ((n->nlmsg_flags & NLM_F_CREATE) && - (n->nlmsg_flags & NLM_F_REPLACE) && - ((n->nlmsg_flags & NLM_F_EXCL) || - (tca[TCA_KIND] && - nla_strcmp(tca[TCA_KIND], q->ops->id)))) - goto create_n_graft; + if (tca[TCA_KIND] && + nla_strcmp(tca[TCA_KIND], q->ops->id)) { + if (req_create_or_replace(n) || + req_create_exclusive(n)) + goto create_n_graft; + else if (req_change(n)) + goto create_n_graft2; + } } } } else { @@ -1667,6 +1693,7 @@ create_n_graft: NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); return -ENOENT; } +create_n_graft2: if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) { q = qdisc_create(dev, dev_ingress_queue(dev), p, diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index be42b1196786..08aaa6efc62c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -773,12 +773,10 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, - const struct nlattr *attr) +static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) { size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); - spinlock_t *root_lock; struct disttable *d; int i; @@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_sleeping_lock(sch); - - spin_lock_bh(root_lock); - swap(*tbl, d); - spin_unlock_bh(root_lock); - - dist_free(d); + *tbl = d; return 0; } @@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; + struct disttable *delay_dist = NULL; + struct disttable *slot_dist = NULL; struct tc_netem_qopt *qopt; struct clgstate old_clg; int old_loss_model = CLG_RANDOM; @@ -969,6 +963,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (ret < 0) return ret; + if (tb[TCA_NETEM_DELAY_DIST]) { + ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -978,26 +984,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); if (ret) { q->loss_model = old_loss_model; + q->clg = old_clg; goto unlock; } } else { q->loss_model = CLG_RANDOM; } - if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, &q->delay_dist, - tb[TCA_NETEM_DELAY_DIST]); - if (ret) - goto get_table_failure; - } - - if (tb[TCA_NETEM_SLOT_DIST]) { - ret = get_dist_table(sch, &q->slot_dist, - tb[TCA_NETEM_SLOT_DIST]); - if (ret) - goto get_table_failure; - } - + if (delay_dist) + swap(q->delay_dist, delay_dist); + if (slot_dist) + swap(q->slot_dist, slot_dist); sch->limit = qopt->limit; q->latency = PSCHED_TICKS2NS(qopt->latency); @@ -1047,17 +1044,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, unlock: sch_tree_unlock(sch); - return ret; -get_table_failure: - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - - goto unlock; +table_free: + dist_free(delay_dist); + dist_free(slot_dist); + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e203deacc953..e40b4425eb6b 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -780,6 +780,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, }; +static struct netlink_range_validation_signed taprio_cycle_time_range = { + .min = 0, + .max = INT_MAX, +}; + static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_PRIOMAP] = { .len = sizeof(struct tc_mqprio_qopt) @@ -788,7 +793,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, - [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = + NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, @@ -923,6 +929,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, return -EINVAL; } + if (cycle < 0 || cycle > INT_MAX) { + NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); + return -EINVAL; + } + new->cycle_time = cycle; } @@ -1127,7 +1138,7 @@ static void setup_txtime(struct taprio_sched *q, struct sched_gate_list *sched, ktime_t base) { struct sched_entry *entry; - u32 interval = 0; + u64 interval = 0; list_for_each_entry(entry, &sched->entries, list) { entry->next_txtime = ktime_add_ns(base, interval); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f10ad80fd694..717e2f60370b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { - sctp_memory_pressure = 1; + WRITE_ONCE(sctp_memory_pressure, 1); } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 507ba8b79992..41095a278f79 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -962,9 +962,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rep->rr_rdmabuf) goto out_free; - if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) - goto out_free_regbuf; - rep->rr_cid.ci_completion_id = atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); @@ -983,8 +980,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, spin_unlock(&buf->rb_lock); return rep; -out_free_regbuf: - rpcrdma_regbuf_free(rep->rr_rdmabuf); out_free: kfree(rep); out: @@ -1391,6 +1386,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) break; + if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { + rpcrdma_rep_put(buf, rep); + break; + } rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; trace_xprtrdma_post_recv(rep); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index cf75969375cf..88785196a896 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -45,14 +45,14 @@ */ static DECLARE_RWSEM(device_offload_lock); -static void tls_device_gc_task(struct work_struct *work); +static struct workqueue_struct *destruct_wq __read_mostly; -static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); -static LIST_HEAD(tls_device_gc_list); static LIST_HEAD(tls_device_list); static LIST_HEAD(tls_device_down_list); static DEFINE_SPINLOCK(tls_device_lock); +static struct page *dummy_page; + static void tls_device_free_ctx(struct tls_context *ctx) { if (ctx->tx_conf == TLS_HW) { @@ -67,47 +67,44 @@ static void tls_device_free_ctx(struct tls_context *ctx) tls_ctx_free(NULL, ctx); } -static void tls_device_gc_task(struct work_struct *work) +static void tls_device_tx_del_task(struct work_struct *work) { - struct tls_context *ctx, *tmp; - unsigned long flags; - LIST_HEAD(gc_list); - - spin_lock_irqsave(&tls_device_lock, flags); - list_splice_init(&tls_device_gc_list, &gc_list); - spin_unlock_irqrestore(&tls_device_lock, flags); - - list_for_each_entry_safe(ctx, tmp, &gc_list, list) { - struct net_device *netdev = ctx->netdev; + struct tls_offload_context_tx *offload_ctx = + container_of(work, struct tls_offload_context_tx, destruct_work); + struct tls_context *ctx = offload_ctx->ctx; + struct net_device *netdev = ctx->netdev; - if (netdev && ctx->tx_conf == TLS_HW) { - netdev->tlsdev_ops->tls_dev_del(netdev, ctx, - TLS_OFFLOAD_CTX_DIR_TX); - dev_put(netdev); - ctx->netdev = NULL; - } - - list_del(&ctx->list); - tls_device_free_ctx(ctx); - } + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); + dev_put(netdev); + ctx->netdev = NULL; + tls_device_free_ctx(ctx); } static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { unsigned long flags; + bool async_cleanup; spin_lock_irqsave(&tls_device_lock, flags); - if (unlikely(!refcount_dec_and_test(&ctx->refcount))) - goto unlock; + if (unlikely(!refcount_dec_and_test(&ctx->refcount))) { + spin_unlock_irqrestore(&tls_device_lock, flags); + return; + } - list_move_tail(&ctx->list, &tls_device_gc_list); + list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */ + async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW; + if (async_cleanup) { + struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx); - /* schedule_work inside the spinlock - * to make sure tls_device_down waits for that work. - */ - schedule_work(&tls_device_gc_work); -unlock: + /* queue_work inside the spinlock + * to make sure tls_device_down waits for that work. + */ + queue_work(destruct_wq, &offload_ctx->destruct_work); + } spin_unlock_irqrestore(&tls_device_lock, flags); + + if (!async_cleanup) + tls_device_free_ctx(ctx); } /* We assume that the socket is already connected */ @@ -302,36 +299,33 @@ static int tls_push_record(struct sock *sk, return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); } -static int tls_device_record_close(struct sock *sk, - struct tls_context *ctx, - struct tls_record_info *record, - struct page_frag *pfrag, - unsigned char record_type) +static void tls_device_record_close(struct sock *sk, + struct tls_context *ctx, + struct tls_record_info *record, + struct page_frag *pfrag, + unsigned char record_type) { struct tls_prot_info *prot = &ctx->prot_info; - int ret; + struct page_frag dummy_tag_frag; /* append tag * device will fill in the tag, we just need to append a placeholder * use socket memory to improve coalescing (re-using a single buffer * increases frag count) - * if we can't allocate memory now, steal some back from data + * if we can't allocate memory now use the dummy page */ - if (likely(skb_page_frag_refill(prot->tag_size, pfrag, - sk->sk_allocation))) { - ret = 0; - tls_append_frag(record, pfrag, prot->tag_size); - } else { - ret = prot->tag_size; - if (record->len <= prot->overhead_size) - return -ENOMEM; + if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) && + !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) { + dummy_tag_frag.page = dummy_page; + dummy_tag_frag.offset = 0; + pfrag = &dummy_tag_frag; } + tls_append_frag(record, pfrag, prot->tag_size); /* fill prepend */ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), record->len - prot->overhead_size, record_type); - return ret; } static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, @@ -507,18 +501,8 @@ last_record: if (done || record->len >= max_open_record_len || (record->num_frags >= MAX_SKB_FRAGS - 1)) { - rc = tls_device_record_close(sk, tls_ctx, record, - pfrag, record_type); - if (rc) { - if (rc > 0) { - size += rc; - } else { - size = orig_size; - destroy_record(record); - ctx->open_record = NULL; - break; - } - } + tls_device_record_close(sk, tls_ctx, record, + pfrag, record_type); rc = tls_push_record(sk, tls_ctx, @@ -1105,6 +1089,9 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) start_marker_record->len = 0; start_marker_record->num_frags = 0; + INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task); + offload_ctx->ctx = ctx; + INIT_LIST_HEAD(&offload_ctx->records_list); list_add_tail(&start_marker_record->list, &offload_ctx->records_list); spin_lock_init(&offload_ctx->lock); @@ -1362,7 +1349,7 @@ static int tls_device_down(struct net_device *netdev) up_write(&device_offload_lock); - flush_work(&tls_device_gc_work); + flush_workqueue(destruct_wq); return NOTIFY_DONE; } @@ -1403,12 +1390,36 @@ static struct notifier_block tls_dev_notifier = { int __init tls_device_init(void) { - return register_netdevice_notifier(&tls_dev_notifier); + int err; + + dummy_page = alloc_page(GFP_KERNEL); + if (!dummy_page) + return -ENOMEM; + + destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); + if (!destruct_wq) { + err = -ENOMEM; + goto err_free_dummy; + } + + err = register_netdevice_notifier(&tls_dev_notifier); + if (err) + goto err_destroy_wq; + + return 0; + +err_destroy_wq: + destroy_workqueue(destruct_wq); +err_free_dummy: + put_page(dummy_page); + return err; } void __exit tls_device_cleanup(void) { unregister_netdevice_notifier(&tls_dev_notifier); - flush_work(&tls_device_gc_work); + flush_workqueue(destruct_wq); + destroy_workqueue(destruct_wq); clean_acked_data_flush(); + put_page(dummy_page); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d326540e4938..5264fe82e6ec 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -717,7 +717,7 @@ static int unix_set_peek_off(struct sock *sk, int val) if (mutex_lock_interruptible(&u->iolock)) return -EINTR; - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); mutex_unlock(&u->iolock); return 0; @@ -2156,6 +2156,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, if (false) { alloc_skb: + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, @@ -2195,6 +2196,7 @@ alloc_skb: init_scm = false; } + spin_lock(&other->sk_receive_queue.lock); skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; @@ -2225,14 +2227,11 @@ alloc_skb: refcount_add(size, &sk->sk_wmem_alloc); if (newskb) { - err = unix_scm_to_skb(&scm, skb, false); - if (err) - goto err_state_unlock; - spin_lock(&other->sk_receive_queue.lock); + unix_scm_to_skb(&scm, skb, false); __skb_queue_tail(&other->sk_receive_queue, newskb); - spin_unlock(&other->sk_receive_queue.lock); } + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a565476809f0..c7192d7bcbd7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -641,7 +641,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp); if (ret) - return ret; + return 0; /* RNR IE may contain more than one NEIGHBOR_AP_INFO */ while (pos + sizeof(*ap_info) <= end) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index d7e542d7a1c6..cbe68162260b 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1001,6 +1001,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) err = xp_alloc_tx_descs(xs->pool, xs); if (err) { xp_put_pool(xs->pool); + xs->pool = NULL; sockfd_put(sock); goto out_unlock; } diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 8cbf45a8bcdc..655fe4ff8621 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index 694eec6ca147..ded752e33dac 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -399,8 +399,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); if (!dst) { fl.u.ip6.flowi6_oif = dev->ifindex; fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; @@ -414,8 +414,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) } break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); if (!dst) { struct rtable *rt; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index eb0952dbf423..ff56b6a0162e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -527,7 +527,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; - if (re) { + if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, @@ -1159,6 +1159,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; + + /* see addr_match(), (prefix length >> 5) << 2 + * will be used to compare xfrm_address_t + */ + if (filter->splen > (sizeof(xfrm_address_t) << 3) || + filter->dplen > (sizeof(xfrm_address_t) << 3)) { + kfree(filter); + return -EINVAL; + } } if (attrs[XFRMA_PROTO]) @@ -2825,7 +2834,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, @@ -2845,6 +2854,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 0ae1b718194a..7f7858593bdb 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2011,6 +2011,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) if (!datum) goto out; + datum->next = NULL; *dst = datum; /* ebitmap_read() will at least init the bitmap */ @@ -2023,7 +2024,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) goto out; datum->otype = le32_to_cpu(buf[0]); - datum->next = NULL; dst = &datum->next; } diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index f1470590239e..711e71016a7c 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -31,20 +31,51 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL; module_param(max_alloc_per_card, ulong, 0644); MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card."); +static void __update_allocated_size(struct snd_card *card, ssize_t bytes) +{ + card->total_pcm_alloc_bytes += bytes; +} + +static void update_allocated_size(struct snd_card *card, ssize_t bytes) +{ + mutex_lock(&card->memory_mutex); + __update_allocated_size(card, bytes); + mutex_unlock(&card->memory_mutex); +} + +static void decrease_allocated_size(struct snd_card *card, size_t bytes) +{ + mutex_lock(&card->memory_mutex); + WARN_ON(card->total_pcm_alloc_bytes < bytes); + __update_allocated_size(card, -(ssize_t)bytes); + mutex_unlock(&card->memory_mutex); +} + static int do_alloc_pages(struct snd_card *card, int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab) { int err; + /* check and reserve the requested size */ + mutex_lock(&card->memory_mutex); if (max_alloc_per_card && - card->total_pcm_alloc_bytes + size > max_alloc_per_card) + card->total_pcm_alloc_bytes + size > max_alloc_per_card) { + mutex_unlock(&card->memory_mutex); return -ENOMEM; + } + __update_allocated_size(card, size); + mutex_unlock(&card->memory_mutex); err = snd_dma_alloc_pages(type, dev, size, dmab); if (!err) { - mutex_lock(&card->memory_mutex); - card->total_pcm_alloc_bytes += dmab->bytes; - mutex_unlock(&card->memory_mutex); + /* the actual allocation size might be bigger than requested, + * and we need to correct the account + */ + if (dmab->bytes != size) + update_allocated_size(card, dmab->bytes - size); + } else { + /* take back on allocation failure */ + decrease_allocated_size(card, size); } return err; } @@ -53,10 +84,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab) { if (!dmab->area) return; - mutex_lock(&card->memory_mutex); - WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes); - card->total_pcm_alloc_bytes -= dmab->bytes; - mutex_unlock(&card->memory_mutex); + decrease_allocated_size(card, dmab->bytes); snd_dma_free_pages(dmab); dmab->area = NULL; } diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index fe3587547cfe..39610a15bcc9 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); */ void snd_hdac_regmap_sync(struct hdac_device *codec) { - if (codec->regmap) { - mutex_lock(&codec->regmap_lock); + mutex_lock(&codec->regmap_lock); + if (codec->regmap) regcache_sync(codec->regmap); - mutex_unlock(&codec->regmap_lock); - } + mutex_unlock(&codec->regmap_lock); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 965720b1d1b1..59e11a070c20 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10088,6 +10088,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; + spec->en_3kpull_low = true; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -10170,14 +10171,16 @@ static int patch_alc269(struct hda_codec *codec) spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) - spec->en_3kpull_low = true; + if (codec->core.vendor_id == 0x10ec0236 && + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) + spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; + spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: @@ -10809,6 +10812,7 @@ enum { ALC897_FIXUP_HP_HSMIC_VERB, ALC897_FIXUP_LENOVO_HEADSET_MODE, ALC897_FIXUP_HEADSET_MIC_PIN2, + ALC897_FIXUP_UNIS_H3C_X500S, }; static const struct hda_fixup alc662_fixups[] = { @@ -11248,6 +11252,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE }, + [ALC897_FIXUP_UNIS_H3C_X500S] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 }, + {} + }, + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11409,6 +11420,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"}, {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"}, + {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"}, {} }; diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c index 82d4e0fda91b..d62a0e2ddf60 100644 --- a/sound/pci/ymfpci/ymfpci.c +++ b/sound/pci/ymfpci/ymfpci.c @@ -150,8 +150,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ -static int snd_card_ymfpci_probe(struct pci_dev *pci, - const struct pci_device_id *pci_id) +static int __snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; @@ -333,6 +333,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci, return 0; } +static int snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) +{ + return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id)); +} + static struct pci_driver ymfpci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ymfpci_ids, diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index e59323fd5bf2..5e00aca0c418 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); regmap_write(rt5665->regmap, RT5665_RESET, 0); + + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); } #ifdef CONFIG_PM diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index cab7fa2851aa..4834cfd163c0 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, struct axg_tdm_stream *ts, unsigned int offset) { - unsigned int val, ch = ts->channels; - unsigned long mask; - int i, j; + unsigned int ch = ts->channels; + u32 val[AXG_TDM_NUM_LANES]; + int i, j, k; + + /* + * We need to mimick the slot distribution used by the HW to keep the + * channel placement consistent regardless of the number of channel + * in the stream. This is why the odd algorithm below is used. + */ + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES); /* * Distribute the channels of the stream over the available slots - * of each TDM lane + * of each TDM lane. We need to go over the 32 slots ... */ - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { - val = 0; - mask = ts->mask[i]; - - for (j = find_first_bit(&mask, 32); - (j < 32) && ch; - j = find_next_bit(&mask, 32, j + 1)) { - val |= 1 << j; - ch -= 1; + for (i = 0; (i < 32) && ch; i += 2) { + /* ... of all the lanes ... */ + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { + /* ... then distribute the channels in pairs */ + for (k = 0; k < 2; k++) { + if ((BIT(i + k) & ts->mask[j]) && ch) { + val[j] |= BIT(i + k); + ch -= 1; + } + } } - - regmap_write(map, offset, val); - offset += regmap_get_reg_stride(map); } /* @@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, return -EINVAL; } + for (i = 0; i < AXG_TDM_NUM_LANES; i++) { + regmap_write(map, offset, val[i]); + offset += regmap_get_reg_stride(map); + } + return 0; } EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 35cbef171f4a..038d09f6203a 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1040,12 +1040,22 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev) pdata->machine = hda_mach; pdata->tplg_filename = tplg_filename; - if (codec_num == 2) { + if (codec_num == 2 || + (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) { /* * Prevent SoundWire links from starting when an external * HDaudio codec is used */ hda_mach->mach_params.link_mask = 0; + } else { + /* + * Allow SoundWire links to start when no external HDaudio codec + * was detected. This will not create a SoundWire card but + * will help detect if any SoundWire codec reports as ATTACHED. + */ + struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata; + + hda_mach->mach_params.link_mask = hdev->info.link_mask; } } } diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index efe62f19c4d2..6d332c9eb444 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -4431,6 +4431,35 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* Advanced modes of the Mythware XA001AU. + * For the standard mode, Mythware XA001AU has ID ffad:a001 + */ + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Mythware", + .product_name = "XA001AU", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE, + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = -1 + } + } + } +}, #undef USB_DEVICE_VENDOR_SPEC #undef USB_AUDIO_DEVICE diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index a2042c418686..51f5b24af834 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -71,6 +71,23 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -123,6 +140,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -132,12 +156,16 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index cf8ea3594125..76acd39478ee 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -725,8 +725,11 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f331780f0425..36ad0b6b94a9 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -370,7 +370,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { @@ -990,16 +990,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + #define NEGATIVE_RELOC ((void *)-1L) static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) @@ -1228,14 +1245,14 @@ static int add_jump_destinations(struct objtool_file *file) continue; /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2066,6 +2083,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (!strcmp(func->name, "__fentry__")) func->fentry = true; @@ -2174,12 +2194,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn->call_dest && - insn->call_dest->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn->call_dest; + + if (!dest) + return false; + + if (dest->fentry) + return true; + } return false; } @@ -3125,7 +3150,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (!no_fp && func && !is_fentry_call(insn) && + if (!no_fp && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 8d57e3d1f763..d81d9a8e4808 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -89,6 +89,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index 6cdfa401b000..f06398ea5751 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -60,6 +60,7 @@ struct symbol { u8 return_thunk : 1; u8 fentry : 1; u8 kcov : 1; + u8 embedded_insn : 1; }; struct reloc { diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh index 00d2e0e2e0c2..319f36ebb9a4 100644 --- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh +++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh @@ -4,6 +4,12 @@ set -e +# skip if there's no gcc +if ! [ -x "$(command -v gcc)" ]; then + echo "failed: no gcc compiler" + exit 2 +fi + temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX) cleanup() @@ -11,7 +17,7 @@ cleanup() trap - EXIT TERM INT if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then echo "--- Cleaning up ---" - perf probe -x ${temp_dir}/testfile -d foo + perf probe -x ${temp_dir}/testfile -d foo || true rm -f "${temp_dir}/"* rmdir "${temp_dir}" fi diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index a61c7bcbc72d..63f468bf8245 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -177,7 +177,7 @@ void regression1_test(void) nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); - threads = malloc(nr_threads * sizeof(pthread_t *)); + threads = malloc(nr_threads * sizeof(*threads)); for (i = 0; i < nr_threads; i++) { arg = i; diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 5861446d0777..7996ec07e0bd 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -2,7 +2,7 @@ #include <test_progs.h> #define MAX_INSNS 512 -#define MAX_MATCHES 16 +#define MAX_MATCHES 24 struct bpf_reg_match { unsigned int line; @@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = { */ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), @@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = { BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), @@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = { {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, - * resulting in auxiliary alignment of 4. + * resulting in auxiliary alignment of 4. To avoid BPF + * verifier's precision backtracking logging + * interfering we also have a no-op R4 = R5 + * instruction to validate R5 state. We also check + * that R4 is what it should be in such case. */ - {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {26, "R5_w=pkt(id=0,off=14,r=8"}, + {27, "R5_w=pkt(id=0,off=14,r=8"}, /* Variable offset is added to R5, resulting in a - * variable offset of (4n). + * variable offset of (4n). See comment for insn #19 + * for R4 = R5 trick. */ - {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, }, }, { diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 3a469099f30d..e09c5239a595 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -29,7 +29,23 @@ static int stop, duration; static bool configure_stack(void) { + char tc_version[128]; char tc_cmd[BUFSIZ]; + char *prog; + FILE *tc; + + /* Check whether tc is built with libbpf. */ + tc = popen("tc -V", "r"); + if (CHECK_FAIL(!tc)) + return false; + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + return false; + if (strstr(tc_version, ", libbpf ")) + prog = "test_sk_assign_libbpf.o"; + else + prog = "test_sk_assign.o"; + if (CHECK_FAIL(pclose(tc))) + return false; /* Move to a new networking namespace */ if (CHECK_FAIL(unshare(CLONE_NEWNET))) @@ -46,8 +62,8 @@ configure_stack(void) /* Load qdisc, BPF program */ if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) return false; - sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", - "direct-action object-file ./test_sk_assign.o", + sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf", + "direct-action object-file", prog, "section classifier/sk_assign_test", (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); if (CHECK(system(tc_cmd), "BPF load failed;", @@ -129,15 +145,12 @@ get_port(int fd) static ssize_t rcv_msg(int srv_client, int type) { - struct sockaddr_storage ss; char buf[BUFSIZ]; - socklen_t slen; if (type == SOCK_STREAM) return read(srv_client, &buf, sizeof(buf)); else - return recvfrom(srv_client, &buf, sizeof(buf), 0, - (struct sockaddr *)&ss, &slen); + return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL); } static int diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index a943d394fd3a..38ab1ce32e57 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -33,7 +33,7 @@ int _version SEC("version") = 1; -__attribute__ ((noinline)) +__attribute__ ((noinline)) __weak int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 1ecd987005d2..77fd42f835fc 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -16,6 +16,16 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#if defined(IPROUTE2_HAVE_LIBBPF) +/* Use a new-style map definition. */ +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __type(key, int); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_BY_NAME); + __uint(max_entries, 1); +} server_map SEC(".maps"); +#else /* Pin map under /sys/fs/bpf/tc/globals/<map name> */ #define PIN_GLOBAL_NS 2 @@ -35,6 +45,7 @@ struct { .max_elem = 1, .pinning = PIN_GLOBAL_NS, }; +#endif int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c new file mode 100644 index 000000000000..dcf46adfda04 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define IPROUTE2_HAVE_LIBBPF +#include "test_sk_assign.c" diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 0c066ba579d4..a194dbcb405a 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -1917,6 +1917,11 @@ basic() run_cmd "$IP link set dev lo up" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) blackhole" + run_cmd "timeout 5 $IP nexthop" + log_test $? 0 "Maximum nexthop ID dump" + # # groups # @@ -2137,6 +2142,11 @@ basic_res() run_cmd "$IP nexthop bucket list fdb" log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) group 1/2 type resilient buckets 4" + run_cmd "timeout 5 $IP nexthop bucket" + log_test $? 0 "Maximum nexthop ID dump" + # # resilient nexthop buckets get requests # diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index dbb9fcf759e0..aa2eafb7b243 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -286,6 +286,8 @@ different_speeds_autoneg_on() ethtool -s $h1 autoneg on } +skip_on_veth + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh index 4b42dfd4efd1..baf831da5366 100755 --- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -95,6 +95,8 @@ no_cable() ip link set dev $swp3 down } +skip_on_veth + setup_prepare tests_run diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 45da318d40ee..8c0d98a4bd5e 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -129,6 +129,17 @@ check_ethtool_lanes_support() fi } +skip_on_veth() +{ + local kind=$(ip -j -d link show dev ${NETIFS[p1]} | + jq -r '.[].linkinfo.info_kind') + + if [[ $kind == veth ]]; then + echo "SKIP: Test cannot be run with veth pairs" + exit $ksft_skip + fi +} + if [[ "$(id -u)" -ne 0 ]]; then echo "SKIP: need root privileges" exit $ksft_skip @@ -191,6 +202,11 @@ create_netif_veth() for ((i = 1; i <= NUM_NETIFS; ++i)); do local j=$((i+1)) + if [ -z ${NETIFS[p$i]} ]; then + echo "SKIP: Cannot create interface. Name not specified" + exit $ksft_skip + fi + ip link show dev ${NETIFS[p$i]} &> /dev/null if [[ $? -ne 0 ]]; then ip link add ${NETIFS[p$i]} type veth \ diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index 472bd023e2a5..b501b366367f 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -72,7 +72,8 @@ test_span_gre_ttl() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev \ + "prot ip flower $tcflags ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower ip_ttl 50 action pass diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index 877c1c40bc9c..fb8e810f905f 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -8,6 +8,8 @@ NUM_NETIFS=4 source tc_common.sh source lib.sh +require_command ncat + tcflags="skip_hw" h1_create() @@ -155,10 +157,10 @@ gact_trap_test() mirred_egress_to_ingress_tcp_test() { - local tmpfile=$(mktemp) tmpfile1=$(mktemp) + mirred_e2i_tf1=$(mktemp) mirred_e2i_tf2=$(mktemp) RET=0 - dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile + dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$mirred_e2i_tf1 tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \ action ct commit nat src addr 192.0.2.2 pipe \ @@ -174,11 +176,11 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1 & + ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & local rpid=$! - ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile + ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 wait -n $rpid - cmp -s $tmpfile $tmpfile1 + cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 check_err $? "server output check failed" $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \ @@ -195,7 +197,7 @@ mirred_egress_to_ingress_tcp_test() tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower - rm -f $tmpfile $tmpfile1 + rm -f $mirred_e2i_tf1 $mirred_e2i_tf2 log_test "mirred_egress_to_ingress_tcp ($tcflags)" } @@ -224,6 +226,8 @@ setup_prepare() cleanup() { + local tf + pre_cleanup switch_destroy @@ -234,6 +238,8 @@ cleanup() ip link set $swp2 address $swp2origmac ip link set $swp1 address $swp1origmac + + for tf in $mirred_e2i_tf1 $mirred_e2i_tf2; do rm -f $tf; done } mirred_egress_redirect_test() diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index 683711f41aa9..b1daad19b01e 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -52,8 +52,8 @@ match_dst_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower @@ -78,8 +78,8 @@ match_src_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index 215e1067f037..82ceca6aab96 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif +top_srcdir = ../../../.. + CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ - $(CLANG_FLAGS) + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include LDLIBS += -lpthread -ldl # Own dependencies because we only want to build against 1st prerequisite, but diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 986b9458efb2..e20191fb40d4 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -29,12 +29,22 @@ #include <dlfcn.h> #include <stddef.h> +#include <linux/compiler.h> + #include "../kselftest.h" #include "rseq.h" -static const ptrdiff_t *libc_rseq_offset_p; -static const unsigned int *libc_rseq_size_p; -static const unsigned int *libc_rseq_flags_p; +/* + * Define weak versions to play nice with binaries that are statically linked + * against a libc that doesn't support registering its own rseq. + */ +__weak ptrdiff_t __rseq_offset; +__weak unsigned int __rseq_size; +__weak unsigned int __rseq_flags; + +static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; +static const unsigned int *libc_rseq_size_p = &__rseq_size; +static const unsigned int *libc_rseq_flags_p = &__rseq_flags; /* Offset from the thread pointer to the rseq area. */ ptrdiff_t rseq_offset; @@ -108,10 +118,19 @@ int rseq_unregister_current_thread(void) static __attribute__((constructor)) void rseq_init(void) { - libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); - libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); - libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); - if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) { + /* + * If the libc's registered rseq size isn't already valid, it may be + * because the binary is dynamically linked and not necessarily due to + * libc not having registered a restartable sequence. Try to find the + * symbols if that's the case. + */ + if (!*libc_rseq_size_p) { + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + } + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && + *libc_rseq_size_p != 0) { /* rseq registration owned by glibc */ rseq_offset = *libc_rseq_offset_p; rseq_size = *libc_rseq_size_p; |