diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1017_linux-5.5.18.patch | 10333 |
2 files changed, 10337 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 0952792e..e488875f 100644 --- a/0000_README +++ b/0000_README @@ -111,6 +111,10 @@ Patch: 1016_linux-5.5.17.patch From: http://www.kernel.org Desc: Linux 5.5.17 +Patch: 1017_linux-5.5.18.patch +From: http://www.kernel.org +Desc: Linux 5.5.18 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1017_linux-5.5.18.patch b/1017_linux-5.5.18.patch new file mode 100644 index 00000000..2e27fcad --- /dev/null +++ b/1017_linux-5.5.18.patch @@ -0,0 +1,10333 @@ +diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst +index f8a72ffffe66..6e12de9fc34e 100644 +--- a/Documentation/sound/hd-audio/index.rst ++++ b/Documentation/sound/hd-audio/index.rst +@@ -8,3 +8,4 @@ HD-Audio + models + controls + dp-mst ++ realtek-pc-beep +diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst +index 11298f0ce44d..0ea967d34583 100644 +--- a/Documentation/sound/hd-audio/models.rst ++++ b/Documentation/sound/hd-audio/models.rst +@@ -216,8 +216,6 @@ alc298-dell-aio + ALC298 fixups on Dell AIO machines + alc275-dell-xps + ALC275 fixups on Dell XPS models +-alc256-dell-xps13 +- ALC256 fixups on Dell XPS13 + lenovo-spk-noise + Workaround for speaker noise on Lenovo machines + lenovo-hotkey +diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst +new file mode 100644 +index 000000000000..be47c6f76a6e +--- /dev/null ++++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst +@@ -0,0 +1,129 @@ ++=============================== ++Realtek PC Beep Hidden Register ++=============================== ++ ++This file documents the "PC Beep Hidden Register", which is present in certain ++Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can ++route audio between pins but aren't themselves exposed as HDA widgets. As far ++as I can tell, these hidden routes are designed to allow flexible PC Beep output ++for codecs that don't have mixer widgets in their output paths. Why it's easier ++to hide a mixer behind an undocumented vendor register than to just expose it ++as a widget, I have no idea. ++ ++Register Description ++==================== ++ ++The register is accessed via processing coefficient 0x36 on NID 20h. Bits not ++identified below have no discernible effect on my machine, a Dell XPS 13 9350:: ++ ++ MSB LSB ++ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ | |h|S|L| | B |R| | Known bits ++ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ ++ |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value ++ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ ++1Ah input select (B): 2 bits ++ When zero, expose the PC Beep line (from the internal beep generator, when ++ enabled with the Set Beep Generation verb on NID 01h, or else from the ++ external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone ++ jack (or possibly Line In on some machines) input instead. If PC Beep is ++ selected, the 1Ah boost control has no effect. ++ ++Amplify 1Ah loopback, left (L): 1 bit ++ Amplify the left channel of 1Ah before mixing it into outputs as specified ++ by h and S bits. Does not affect the level of 1Ah exposed to other widgets. ++ ++Amplify 1Ah loopback, right (R): 1 bit ++ Amplify the right channel of 1Ah before mixing it into outputs as specified ++ by h and S bits. Does not affect the level of 1Ah exposed to other widgets. ++ ++Loopback 1Ah to 21h [active low] (h): 1 bit ++ When zero, mix 1Ah (possibly with amplification, depending on L and R bits) ++ into 21h (headphone jack on my machine). Mixed signal respects the mute ++ setting on 21h. ++ ++Loopback 1Ah to 14h (S): 1 bit ++ When one, mix 1Ah (possibly with amplification, depending on L and R bits) ++ into 14h (internal speaker on my machine). Mixed signal **ignores** the mute ++ setting on 14h and is present whenever 14h is configured as an output. ++ ++Path diagrams ++============= ++ ++1Ah input selection (DIV is the PC Beep divider set on NID 01h):: ++ ++ <Beep generator> <PCBEEP pin> <Headphone jack> ++ | | | ++ +--DIV--+--!DIV--+ {1Ah boost control} ++ | | ++ +--(b == 0)--+--(b != 0)--+ ++ | ++ >1Ah (Beep/Headphone Mic/Line In)< ++ ++Loopback of 1Ah to 21h/14h:: ++ ++ <1Ah (Beep/Headphone Mic/Line In)> ++ | ++ {amplify if L/R} ++ | ++ +-----!h-----+-----S-----+ ++ | | ++ {21h mute control} | ++ | | ++ >21h (Headphone)< >14h (Internal Speaker)< ++ ++Background ++========== ++ ++All Realtek HDA codecs have a vendor-defined widget with node ID 20h which ++provides access to a bank of registers that control various codec functions. ++Registers are read and written via the standard HDA processing coefficient ++verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is ++named "Realtek Vendor Registers" in public datasheets' verb listings and, ++apart from that, is entirely undocumented. ++ ++This particular register, exposed at coefficient 0x36 and named in commits from ++Realtek, is of note: unlike most registers, which seem to control detailed ++amplifier parameters not in scope of the HDA specification, it controls audio ++routing which could just as easily have been defined using standard HDA mixer ++and selector widgets. ++ ++Specifically, it selects between two sources for the input pin widget with Node ++ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my ++laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek ++commits indicate that it might be a Line In on some machines) or from the PC ++Beep line (which is itself multiplexed between the codec's internal beep ++generator and external PCBEEP pin, depending on if the beep generator is ++enabled via verbs on NID 01h). Additionally, it can mix (with optional ++amplification) that signal onto the 21h and/or 14h output pins. ++ ++The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is ++then amplified and mixed into both the headphones and the speakers. Not only ++does this violate the HDA specification, which says that "[a vendor defined ++beep input pin] connection may be maintained *only* while the Link reset ++(**RST#**) is asserted", it means that we cannot ignore the register if we care ++about the input that 1Ah would otherwise expose or if the PCBEEP trace is ++poorly shielded and picks up chassis noise (both of which are the case on my ++machine). ++ ++Unfortunately, there are lots of ways to get this register configuration wrong. ++Linux, it seems, has gone through most of them. For one, the register resets ++after S3 suspend: judging by existing code, this isn't the case for all vendor ++registers, and it's led to some fixes that improve behavior on cold boot but ++don't last after suspend. Other fixes have successfully switched the 1Ah input ++away from PC Beep but have failed to disable both loopback paths. On my ++machine, this means that the headphone input is amplified and looped back to ++the headphone output, which uses the exact same pins! As you might expect, this ++causes terrible headphone noise, the character of which is controlled by the ++1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone ++noise by changing "Headphone Mic Boost" in ALSA, now you know why.) ++ ++The information here has been obtained through black-box reverse engineering of ++the ALC256 codec's behavior and is not guaranteed to be correct. It likely ++also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs ++seem to be close relatives of the ALC256. (They all share one initialization ++function.) Additionally, other codecs like the ALC225 and ALC285 also have this ++register, judging by existing fixups in ``patch_realtek.c``, but specific ++data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ ++from what I've described here. +diff --git a/Makefile b/Makefile +index 0245e5b512d5..0dfb52e860b7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 5 +-SUBLEVEL = 17 ++SUBLEVEL = 18 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts +index 3931fb068ff0..91d1018ab75f 100644 +--- a/arch/arm/boot/dts/dm8148-evm.dts ++++ b/arch/arm/boot/dts/dm8148-evm.dts +@@ -24,12 +24,12 @@ + + &cpsw_emac0 { + phy-handle = <ðphy0>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &cpsw_emac1 { + phy-handle = <ðphy1>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &davinci_mdio { +diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts +index 9e43d5ec0bb2..79ccdd4470f4 100644 +--- a/arch/arm/boot/dts/dm8148-t410.dts ++++ b/arch/arm/boot/dts/dm8148-t410.dts +@@ -33,12 +33,12 @@ + + &cpsw_emac0 { + phy-handle = <ðphy0>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &cpsw_emac1 { + phy-handle = <ðphy1>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &davinci_mdio { +diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts +index 861ab90a3f3a..c16e183822be 100644 +--- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts ++++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts +@@ -24,12 +24,12 @@ + + &cpsw_emac0 { + phy-handle = <ðphy0>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &cpsw_emac1 { + phy-handle = <ðphy1>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + }; + + &davinci_mdio { +diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts +index 09d3d54d09ff..1b5578381d78 100644 +--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts ++++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts +@@ -115,7 +115,7 @@ + gpio-sck = <&gpy3 1 GPIO_ACTIVE_HIGH>; + gpio-mosi = <&gpy3 3 GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; +- cs-gpios = <&gpy4 3 GPIO_ACTIVE_HIGH>; ++ cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>; + + lcd@0 { + compatible = "samsung,ld9040"; +@@ -124,8 +124,6 @@ + vci-supply = <&ldo17_reg>; + reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; + spi-max-frequency = <1200000>; +- spi-cpol; +- spi-cpha; + power-on-delay = <10>; + reset-delay = <10>; + panel-width-mm = <90>; +diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi +index da6b107da84a..aeb5a673c209 100644 +--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi ++++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi +@@ -413,7 +413,7 @@ + reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */ + + /* gpio_183 with sys_nirq2 pad as wakeup */ +- interrupts-extended = <&gpio6 23 IRQ_TYPE_EDGE_FALLING>, ++ interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>, + <&omap4_pmx_core 0x160>; + interrupt-names = "irq", "wakeup"; + wakeup-source; +diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +index f781d330cff5..e8b3669e0e5d 100644 +--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts ++++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +@@ -374,8 +374,8 @@ + }; + + ®_dldo3 { +- regulator-min-microvolt = <2800000>; +- regulator-max-microvolt = <2800000>; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; + regulator-name = "vdd-csi"; + }; + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 1fbe24d4fdb6..c53fdc47b51b 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -72,6 +72,10 @@ stack_protector_prepare: prepare0 + include/generated/asm-offsets.h)) + endif + ++# Ensure that if the compiler supports branch protection we default it ++# off. ++KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none) ++ + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) + KBUILD_CPPFLAGS += -mbig-endian + CHECKFLAGS += -D__AARCH64EB__ +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +index 7c775a918a4e..9073c3532d5b 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +@@ -77,8 +77,7 @@ + }; + + pmu { +- compatible = "arm,cortex-a53-pmu", +- "arm,armv8-pmuv3"; ++ compatible = "arm,cortex-a53-pmu"; + interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +index 24ffe2dcbddb..816b96096ea9 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +@@ -71,8 +71,7 @@ + }; + + pmu { +- compatible = "arm,cortex-a53-pmu", +- "arm,armv8-pmuv3"; ++ compatible = "arm,cortex-a53-pmu"; + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>, +diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +index efb24579922c..5f80bdf7a3ec 100644 +--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +@@ -307,6 +307,7 @@ + interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; + power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>; ++ clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; + assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; + assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ + <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */ +@@ -346,6 +347,7 @@ + interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; + power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>; ++ clocks = <&k3_clks 152 2>; + assigned-clocks = <&k3_clks 152 2>; + assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ + +diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c +index ca158be21f83..bcb14d11232f 100644 +--- a/arch/arm64/kernel/armv8_deprecated.c ++++ b/arch/arm64/kernel/armv8_deprecated.c +@@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = { + }, + { + /* Thumb mode */ +- .instr_mask = 0x0000fff7, ++ .instr_mask = 0xfffffff7, + .instr_val = 0x0000b650, + .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), + .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c +index f97be32bf699..3ad1f76c063a 100644 +--- a/arch/mips/cavium-octeon/octeon-irq.c ++++ b/arch/mips/cavium-octeon/octeon-irq.c +@@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d, + } + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); ++ if (!cd) ++ return -ENOMEM; ++ + cd->host_data = host_data; + cd->bit = hw; + +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c +index 344e6e9ea43b..da407cdc2135 100644 +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -1480,6 +1480,7 @@ static void build_r4000_tlb_refill_handler(void) + + static void setup_pw(void) + { ++ unsigned int pwctl; + unsigned long pgd_i, pgd_w; + #ifndef __PAGETABLE_PMD_FOLDED + unsigned long pmd_i, pmd_w; +@@ -1506,6 +1507,7 @@ static void setup_pw(void) + + pte_i = ilog2(_PAGE_GLOBAL); + pte_w = 0; ++ pwctl = 1 << 30; /* Set PWDirExt */ + + #ifndef __PAGETABLE_PMD_FOLDED + write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); +@@ -1516,8 +1518,9 @@ static void setup_pw(void) + #endif + + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT +- write_c0_pwctl(1 << 6 | psn); ++ pwctl |= (1 << 6 | psn); + #endif ++ write_c0_pwctl(pwctl); + write_c0_kpgd((long)swapper_pg_dir); + kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ + } +diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h +index 8fd8599c9395..3f9ae3585ab9 100644 +--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h ++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h +@@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, + extern int hash__has_transparent_hugepage(void); + #endif + ++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) ++{ ++ BUG(); ++ return pmd; ++} ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ +diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h +index d1d9177d9ebd..0729c034e56f 100644 +--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h ++++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h +@@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, + */ + static inline int hash__pmd_trans_huge(pmd_t pmd) + { +- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == ++ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == + (_PAGE_PTE | H_PAGE_THP_HUGE)); + } + +@@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); + extern int hash__has_transparent_hugepage(void); + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++ ++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) ++{ ++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); ++} ++ + #endif /* __ASSEMBLY__ */ + + #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h +index b01624e5c467..a143d394ff46 100644 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h +@@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm); + + static inline pmd_t pmd_mkdevmap(pmd_t pmd) + { +- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); ++ if (radix_enabled()) ++ return radix__pmd_mkdevmap(pmd); ++ return hash__pmd_mkdevmap(pmd); + } + + static inline int pmd_devmap(pmd_t pmd) +diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h +index d97db3ad9aae..a1c60d5b50af 100644 +--- a/arch/powerpc/include/asm/book3s/64/radix.h ++++ b/arch/powerpc/include/asm/book3s/64/radix.h +@@ -263,6 +263,11 @@ static inline int radix__has_transparent_hugepage(void) + } + #endif + ++static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) ++{ ++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); ++} ++ + extern int __meminit radix__vmemmap_create_mapping(unsigned long start, + unsigned long page_size, + unsigned long phys); +diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h +index 3d76e1c388c2..28c3d936fdf3 100644 +--- a/arch/powerpc/include/asm/drmem.h ++++ b/arch/powerpc/include/asm/drmem.h +@@ -27,12 +27,12 @@ struct drmem_lmb_info { + extern struct drmem_lmb_info *drmem_info; + + #define for_each_drmem_lmb_in_range(lmb, start, end) \ +- for ((lmb) = (start); (lmb) <= (end); (lmb)++) ++ for ((lmb) = (start); (lmb) < (end); (lmb)++) + + #define for_each_drmem_lmb(lmb) \ + for_each_drmem_lmb_in_range((lmb), \ + &drmem_info->lmbs[0], \ +- &drmem_info->lmbs[drmem_info->n_lmbs - 1]) ++ &drmem_info->lmbs[drmem_info->n_lmbs]) + + /* + * The of_drconf_cell_v1 struct defines the layout of the LMB data +diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h +index e9f81bb3f83b..f798e80e4106 100644 +--- a/arch/powerpc/include/asm/setjmp.h ++++ b/arch/powerpc/include/asm/setjmp.h +@@ -7,7 +7,9 @@ + + #define JMP_BUF_LEN 23 + +-extern long setjmp(long *) __attribute__((returns_twice)); +-extern void longjmp(long *, long) __attribute__((noreturn)); ++typedef long jmp_buf[JMP_BUF_LEN]; ++ ++extern int setjmp(jmp_buf env) __attribute__((returns_twice)); ++extern void longjmp(jmp_buf env, int val) __attribute__((noreturn)); + + #endif /* _ASM_POWERPC_SETJMP_H */ +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c +index 180b3a5d1001..05606025a131 100644 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c +@@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu(void) + /* Initialize the base environment -- clear FSCR/HFSCR. */ + hv_mode = !!(mfmsr() & MSR_HV); + if (hv_mode) { +- /* CPU_FTR_HVMODE is used early in PACA setup */ + cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; + mtspr(SPRN_HFSCR, 0); + } +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c +index 2d27ec4feee4..9b340af02c38 100644 +--- a/arch/powerpc/kernel/kprobes.c ++++ b/arch/powerpc/kernel/kprobes.c +@@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs) + if (user_mode(regs)) + return 0; + ++ if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) ++ return 0; ++ + /* + * We don't want to be preempted for the entire + * duration of kprobe processing +diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c +index 949eceb254d8..3f91ccaa9c74 100644 +--- a/arch/powerpc/kernel/paca.c ++++ b/arch/powerpc/kernel/paca.c +@@ -176,7 +176,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) + struct paca_struct **paca_ptrs __read_mostly; + EXPORT_SYMBOL(paca_ptrs); + +-void __init initialise_paca(struct paca_struct *new_paca, int cpu) ++void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu) + { + #ifdef CONFIG_PPC_PSERIES + new_paca->lppaca_ptr = NULL; +@@ -205,7 +205,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) + } + + /* Put the paca pointer into r13 and SPRG_PACA */ +-void setup_paca(struct paca_struct *new_paca) ++void __nostackprotector setup_paca(struct paca_struct *new_paca) + { + /* Setup r13 */ + local_paca = new_paca; +@@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_paca) + /* On Book3E, initialize the TLB miss exception frames */ + mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); + #else +- /* In HV mode, we setup both HPACA and PACA to avoid problems ++ /* ++ * In HV mode, we setup both HPACA and PACA to avoid problems + * if we do a GET_PACA() before the feature fixups have been +- * applied ++ * applied. ++ * ++ * Normally you should test against CPU_FTR_HVMODE, but CPU features ++ * are not yet set up when we first reach here. + */ +- if (early_cpu_has_feature(CPU_FTR_HVMODE)) ++ if (mfmsr() & MSR_HV) + mtspr(SPRN_SPRG_HPACA, local_paca); + #endif + mtspr(SPRN_SPRG_PACA, local_paca); +diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h +index c82577c4b15d..1b02d338a5f5 100644 +--- a/arch/powerpc/kernel/setup.h ++++ b/arch/powerpc/kernel/setup.h +@@ -8,6 +8,12 @@ + #ifndef __ARCH_POWERPC_KERNEL_SETUP_H + #define __ARCH_POWERPC_KERNEL_SETUP_H + ++#ifdef CONFIG_CC_IS_CLANG ++#define __nostackprotector ++#else ++#define __nostackprotector __attribute__((__optimize__("no-stack-protector"))) ++#endif ++ + void initialize_cache_info(void); + void irqstack_early_init(void); + +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 6104917a282d..51efa66a5833 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -279,24 +279,42 @@ void __init record_spr_defaults(void) + * device-tree is not accessible via normal means at this point. + */ + +-void __init early_setup(unsigned long dt_ptr) ++void __init __nostackprotector early_setup(unsigned long dt_ptr) + { + static __initdata struct paca_struct boot_paca; + + /* -------- printk is _NOT_ safe to use here ! ------- */ + +- /* Try new device tree based feature discovery ... */ +- if (!dt_cpu_ftrs_init(__va(dt_ptr))) +- /* Otherwise use the old style CPU table */ +- identify_cpu(0, mfspr(SPRN_PVR)); +- +- /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ ++ /* ++ * Assume we're on cpu 0 for now. ++ * ++ * We need to load a PACA very early for a few reasons. ++ * ++ * The stack protector canary is stored in the paca, so as soon as we ++ * call any stack protected code we need r13 pointing somewhere valid. ++ * ++ * If we are using kcov it will call in_task() in its instrumentation, ++ * which relies on the current task from the PACA. ++ * ++ * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as ++ * printk(), which can trigger both stack protector and kcov. ++ * ++ * percpu variables and spin locks also use the paca. ++ * ++ * So set up a temporary paca. It will be replaced below once we know ++ * what CPU we are on. ++ */ + initialise_paca(&boot_paca, 0); + setup_paca(&boot_paca); + fixup_boot_paca(); + + /* -------- printk is now safe to use ------- */ + ++ /* Try new device tree based feature discovery ... */ ++ if (!dt_cpu_ftrs_init(__va(dt_ptr))) ++ /* Otherwise use the old style CPU table */ ++ identify_cpu(0, mfspr(SPRN_PVR)); ++ + /* Enable early debugging if any specified (see udbg.h) */ + udbg_early_init(); + +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index 84ed2e77ef9c..adfde59cf4ba 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, + err |= __get_user(tsk->thread.ckpt_regs.ccr, + &sc->gp_regs[PT_CCR]); + ++ /* Don't allow userspace to set the trap value */ ++ regs->trap = 0; ++ + /* These regs are not checkpointed; they can go in 'regs'. */ +- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); + err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); + err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); + err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); +diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile +index 378f6108a414..86380c69f5ce 100644 +--- a/arch/powerpc/kexec/Makefile ++++ b/arch/powerpc/kexec/Makefile +@@ -3,9 +3,6 @@ + # Makefile for the linux kernel. + # + +-# Avoid clang warnings around longjmp/setjmp declarations +-CFLAGS_crash.o += -ffreestanding +- + obj-y += core.o crash.o core_$(BITS).o + + obj-$(CONFIG_PPC32) += relocate_32.o +diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c +index 5914fbfa5e0a..30dae60a3025 100644 +--- a/arch/powerpc/kvm/book3s_hv_uvmem.c ++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c +@@ -778,6 +778,9 @@ out: + + void kvmppc_uvmem_free(void) + { ++ if (!kvmppc_uvmem_bitmap) ++ return; ++ + memunmap_pages(&kvmppc_uvmem_pgmap); + release_mem_region(kvmppc_uvmem_pgmap.res.start, + resource_size(&kvmppc_uvmem_pgmap.res)); +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c +index 0e6ed4413eea..1cfe57b51d7e 100644 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c +@@ -117,7 +117,7 @@ static void __init kasan_remap_early_shadow_ro(void) + + kasan_populate_pte(kasan_early_shadow_pte, prot); + +- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { ++ for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) { + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); + pte_t *ptep = pte_offset_kernel(pmd, k_cur); + +diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S +index 2ca407cedbe7..eaeee402f96e 100644 +--- a/arch/powerpc/mm/nohash/tlb_low.S ++++ b/arch/powerpc/mm/nohash/tlb_low.S +@@ -397,7 +397,7 @@ _GLOBAL(set_context) + * extern void loadcam_entry(unsigned int index) + * + * Load TLBCAM[index] entry in to the L2 CAM MMU +- * Must preserve r7, r8, r9, and r10 ++ * Must preserve r7, r8, r9, r10 and r11 + */ + _GLOBAL(loadcam_entry) + mflr r5 +@@ -433,6 +433,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) + */ + _GLOBAL(loadcam_multi) + mflr r8 ++ /* Don't switch to AS=1 if already there */ ++ mfmsr r11 ++ andi. r11,r11,MSR_IS ++ bne 10f + + /* + * Set up temporary TLB entry that is the same as what we're +@@ -458,6 +462,7 @@ _GLOBAL(loadcam_multi) + mtmsr r6 + isync + ++10: + mr r9,r3 + add r10,r3,r4 + 2: bl loadcam_entry +@@ -466,6 +471,10 @@ _GLOBAL(loadcam_multi) + mr r3,r9 + blt 2b + ++ /* Don't return to AS=0 if we were in AS=1 at function start */ ++ andi. r11,r11,MSR_IS ++ bne 3f ++ + /* Return to AS=0 and clear the temporary entry */ + mfmsr r6 + rlwinm. r6,r6,0,~(MSR_IS|MSR_DS) +@@ -481,6 +490,7 @@ _GLOBAL(loadcam_multi) + tlbwe + isync + ++3: + mtlr r8 + blr + #endif +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index a4d40a3ceea3..fd22ec41c008 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -223,7 +223,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, + struct drmem_lmb **end_lmb) + { + struct drmem_lmb *lmb, *start, *end; +- struct drmem_lmb *last_lmb; ++ struct drmem_lmb *limit; + + start = NULL; + for_each_drmem_lmb(lmb) { +@@ -236,10 +236,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, + if (!start) + return -EINVAL; + +- end = &start[n_lmbs - 1]; ++ end = &start[n_lmbs]; + +- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; +- if (end > last_lmb) ++ limit = &drmem_info->lmbs[drmem_info->n_lmbs]; ++ if (end > limit) + return -EINVAL; + + *start_lmb = start; +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c +index 9651ca061828..fe8d396e2301 100644 +--- a/arch/powerpc/sysdev/xive/common.c ++++ b/arch/powerpc/sysdev/xive/common.c +@@ -68,13 +68,6 @@ static u32 xive_ipi_irq; + /* Xive state for each CPU */ + static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); + +-/* +- * A "disabled" interrupt should never fire, to catch problems +- * we set its logical number to this +- */ +-#define XIVE_BAD_IRQ 0x7fffffff +-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) +- + /* An invalid CPU target */ + #define XIVE_INVALID_TARGET (-1) + +@@ -265,11 +258,15 @@ notrace void xmon_xive_do_dump(int cpu) + + int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) + { ++ struct irq_chip *chip = irq_data_get_irq_chip(d); + int rc; + u32 target; + u8 prio; + u32 lirq; + ++ if (!is_xive_irq(chip)) ++ return -EINVAL; ++ + rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); + if (rc) { + xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); +@@ -1150,7 +1147,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) + xc = per_cpu(xive_cpu, cpu); + + /* Check if we are already setup */ +- if (xc->hw_ipi != 0) ++ if (xc->hw_ipi != XIVE_BAD_IRQ) + return 0; + + /* Grab an IPI from the backend, this will populate xc->hw_ipi */ +@@ -1187,7 +1184,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) + /* Disable the IPI and free the IRQ data */ + + /* Already cleaned up ? */ +- if (xc->hw_ipi == 0) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + + /* Mask the IPI */ +@@ -1343,6 +1340,7 @@ static int xive_prepare_cpu(unsigned int cpu) + if (np) + xc->chip_id = of_get_ibm_chip_id(np); + of_node_put(np); ++ xc->hw_ipi = XIVE_BAD_IRQ; + + per_cpu(xive_cpu, cpu) = xc; + } +diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c +index 0ff6b739052c..50e1a8e02497 100644 +--- a/arch/powerpc/sysdev/xive/native.c ++++ b/arch/powerpc/sysdev/xive/native.c +@@ -312,7 +312,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) + s64 rc; + + /* Free the IPI */ +- if (!xc->hw_ipi) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + for (;;) { + rc = opal_xive_free_irq(xc->hw_ipi); +@@ -320,7 +320,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) + msleep(OPAL_BUSY_DELAY_MS); + continue; + } +- xc->hw_ipi = 0; ++ xc->hw_ipi = XIVE_BAD_IRQ; + break; + } + } +diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c +index 55dc61cb4867..3f15615712b5 100644 +--- a/arch/powerpc/sysdev/xive/spapr.c ++++ b/arch/powerpc/sysdev/xive/spapr.c +@@ -560,11 +560,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) + + static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) + { +- if (!xc->hw_ipi) ++ if (xc->hw_ipi == XIVE_BAD_IRQ) + return; + + xive_irq_bitmap_free(xc->hw_ipi); +- xc->hw_ipi = 0; ++ xc->hw_ipi = XIVE_BAD_IRQ; + } + #endif /* CONFIG_SMP */ + +diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h +index 59cd366e7933..382980f4de2d 100644 +--- a/arch/powerpc/sysdev/xive/xive-internal.h ++++ b/arch/powerpc/sysdev/xive/xive-internal.h +@@ -5,6 +5,13 @@ + #ifndef __XIVE_INTERNAL_H + #define __XIVE_INTERNAL_H + ++/* ++ * A "disabled" interrupt should never fire, to catch problems ++ * we set its logical number to this ++ */ ++#define XIVE_BAD_IRQ 0x7fffffff ++#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) ++ + /* Each CPU carry one of these with various per-CPU state */ + struct xive_cpu { + #ifdef CONFIG_SMP +diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile +index c3842dbeb1b7..6f9cccea54f3 100644 +--- a/arch/powerpc/xmon/Makefile ++++ b/arch/powerpc/xmon/Makefile +@@ -1,9 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + # Makefile for xmon + +-# Avoid clang warnings around longjmp/setjmp declarations +-subdir-ccflags-y := -ffreestanding +- + GCOV_PROFILE := n + KCOV_INSTRUMENT := n + UBSAN_SANITIZE := n +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index 1be11c23fa33..50cfa272f9e3 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -52,7 +52,6 @@ config RISCV + select PCI_DOMAINS_GENERIC if PCI + select PCI_MSI if PCI + select RISCV_TIMER +- select UACCESS_MEMCPY if !MMU + select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_ARCH_TOPOLOGY if SMP + select ARCH_HAS_PTE_SPECIAL +diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h +index f462a183a9c2..8ce9d607b53d 100644 +--- a/arch/riscv/include/asm/uaccess.h ++++ b/arch/riscv/include/asm/uaccess.h +@@ -11,6 +11,24 @@ + /* + * User space memory access functions + */ ++ ++extern unsigned long __must_check __asm_copy_to_user(void __user *to, ++ const void *from, unsigned long n); ++extern unsigned long __must_check __asm_copy_from_user(void *to, ++ const void __user *from, unsigned long n); ++ ++static inline unsigned long ++raw_copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ return __asm_copy_from_user(to, from, n); ++} ++ ++static inline unsigned long ++raw_copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ return __asm_copy_to_user(to, from, n); ++} ++ + #ifdef CONFIG_MMU + #include <linux/errno.h> + #include <linux/compiler.h> +@@ -367,24 +385,6 @@ do { \ + -EFAULT; \ + }) + +- +-extern unsigned long __must_check __asm_copy_to_user(void __user *to, +- const void *from, unsigned long n); +-extern unsigned long __must_check __asm_copy_from_user(void *to, +- const void __user *from, unsigned long n); +- +-static inline unsigned long +-raw_copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- return __asm_copy_from_user(to, from, n); +-} +- +-static inline unsigned long +-raw_copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- return __asm_copy_to_user(to, from, n); +-} +- + extern long strncpy_from_user(char *dest, const char __user *src, long count); + + extern long __must_check strlen_user(const char __user *str); +diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile +index 47e7a8204460..0d0db80800c4 100644 +--- a/arch/riscv/lib/Makefile ++++ b/arch/riscv/lib/Makefile +@@ -2,5 +2,5 @@ + lib-y += delay.o + lib-y += memcpy.o + lib-y += memset.o +-lib-$(CONFIG_MMU) += uaccess.o ++lib-y += uaccess.o + lib-$(CONFIG_64BIT) += tishift.o +diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c +index e9dac9a24d3f..61f2b0412345 100644 +--- a/arch/s390/kernel/diag.c ++++ b/arch/s390/kernel/diag.c +@@ -84,7 +84,7 @@ static int show_diag_stat(struct seq_file *m, void *v) + + static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) + { +- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; ++ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL; + } + + static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c +index 076090f9e666..4f6c22d72072 100644 +--- a/arch/s390/kvm/vsie.c ++++ b/arch/s390/kvm/vsie.c +@@ -1202,6 +1202,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) + scb_s->iprcc = PGM_ADDRESSING; + scb_s->pgmilc = 4; + scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); ++ rc = 1; + } + return rc; + } +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c +index edcdca97e85e..9d9ab77d02dd 100644 +--- a/arch/s390/mm/gmap.c ++++ b/arch/s390/mm/gmap.c +@@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start, + static inline unsigned long *gmap_table_walk(struct gmap *gmap, + unsigned long gaddr, int level) + { ++ const int asce_type = gmap->asce & _ASCE_TYPE_MASK; + unsigned long *table; + + if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4)) + return NULL; + if (gmap_is_shadow(gmap) && gmap->removed) + return NULL; +- if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11))) ++ ++ if (asce_type != _ASCE_TYPE_REGION1 && ++ gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) + return NULL; ++ + table = gmap->table; + switch (gmap->asce & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index f2dfd6d083ef..777cf7d659ce 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -106,7 +106,7 @@ SYM_FUNC_START(startup_32) + notl %eax + andl %eax, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx +- jge 1f ++ jae 1f + #endif + movl $LOAD_PHYSICAL_ADDR, %ebx + 1: +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index ee60b81944a7..f5ee513f0195 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -106,7 +106,7 @@ SYM_FUNC_START(startup_32) + notl %eax + andl %eax, %ebx + cmpl $LOAD_PHYSICAL_ADDR, %ebx +- jge 1f ++ jae 1f + #endif + movl $LOAD_PHYSICAL_ADDR, %ebx + 1: +@@ -297,7 +297,7 @@ SYM_CODE_START(startup_64) + notq %rax + andq %rax, %rbp + cmpq $LOAD_PHYSICAL_ADDR, %rbp +- jge 1f ++ jae 1f + #endif + movq $LOAD_PHYSICAL_ADDR, %rbp + 1: +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index 7e0560442538..39243df98100 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -1694,6 +1694,7 @@ SYM_CODE_START(int3) + SYM_CODE_END(int3) + + SYM_CODE_START(general_protection) ++ ASM_CLAC + pushl $do_general_protection + jmp common_exception + SYM_CODE_END(general_protection) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 7425c83fd343..af42663b2077 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1147,7 +1147,7 @@ struct kvm_x86_ops { + bool (*pt_supported)(void); + bool (*pku_supported)(void); + +- int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); ++ int (*check_nested_events)(struct kvm_vcpu *vcpu); + void (*request_immediate_exit)(struct kvm_vcpu *vcpu); + + void (*sched_in)(struct kvm_vcpu *kvm, int cpu); +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index ad97dc155195..9de80cbdd887 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -624,12 +624,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) + return __pmd(val); + } + +-/* mprotect needs to preserve PAT bits when updating vm_page_prot */ ++/* ++ * mprotect needs to preserve PAT and encryption bits when updating ++ * vm_page_prot ++ */ + #define pgprot_modify pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) + { + pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; +- pgprotval_t addbits = pgprot_val(newprot); ++ pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK; + return __pgprot(preservebits | addbits); + } + +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index b5e49e6bac63..8267dd426b15 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -123,7 +123,7 @@ + */ + #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ +- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) ++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC) + #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) + + /* +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 04205ce127a1..f9e84a0e2fa2 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -1740,7 +1740,7 @@ int __acpi_acquire_global_lock(unsigned int *lock) + new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); + val = cmpxchg(lock, old, new); + } while (unlikely (val != old)); +- return (new < 3) ? -1 : 0; ++ return ((new & 0x3) < 3) ? -1 : 0; + } + + int __acpi_release_global_lock(unsigned int *lock) +diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c +index e0cbe4f2af49..c65adaf81384 100644 +--- a/arch/x86/kernel/tsc_msr.c ++++ b/arch/x86/kernel/tsc_msr.c +@@ -15,18 +15,46 @@ + #include <asm/param.h> + #include <asm/tsc.h> + +-#define MAX_NUM_FREQS 9 ++#define MAX_NUM_FREQS 16 /* 4 bits to select the frequency */ ++ ++/* ++ * The frequency numbers in the SDM are e.g. 83.3 MHz, which does not contain a ++ * lot of accuracy which leads to clock drift. As far as we know Bay Trail SoCs ++ * use a 25 MHz crystal and Cherry Trail uses a 19.2 MHz crystal, the crystal ++ * is the source clk for a root PLL which outputs 1600 and 100 MHz. It is ++ * unclear if the root PLL outputs are used directly by the CPU clock PLL or ++ * if there is another PLL in between. ++ * This does not matter though, we can model the chain of PLLs as a single PLL ++ * with a quotient equal to the quotients of all PLLs in the chain multiplied. ++ * So we can create a simplified model of the CPU clock setup using a reference ++ * clock of 100 MHz plus a quotient which gets us as close to the frequency ++ * from the SDM as possible. ++ * For the 83.3 MHz example from above this would give us 100 MHz * 5 / 6 = ++ * 83 and 1/3 MHz, which matches exactly what has been measured on actual hw. ++ */ ++#define TSC_REFERENCE_KHZ 100000 ++ ++struct muldiv { ++ u32 multiplier; ++ u32 divider; ++}; + + /* + * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be + * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. + * Unfortunately some Intel Atom SoCs aren't quite compliant to this, + * so we need manually differentiate SoC families. This is what the +- * field msr_plat does. ++ * field use_msr_plat does. + */ + struct freq_desc { +- u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ ++ bool use_msr_plat; ++ struct muldiv muldiv[MAX_NUM_FREQS]; ++ /* ++ * Some CPU frequencies in the SDM do not map to known PLL freqs, in ++ * that case the muldiv array is empty and the freqs array is used. ++ */ + u32 freqs[MAX_NUM_FREQS]; ++ u32 mask; + }; + + /* +@@ -35,31 +63,81 @@ struct freq_desc { + * by MSR based on SDM. + */ + static const struct freq_desc freq_desc_pnw = { +- 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } ++ .use_msr_plat = false, ++ .freqs = { 0, 0, 0, 0, 0, 99840, 0, 83200 }, ++ .mask = 0x07, + }; + + static const struct freq_desc freq_desc_clv = { +- 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } ++ .use_msr_plat = false, ++ .freqs = { 0, 133200, 0, 0, 0, 99840, 0, 83200 }, ++ .mask = 0x07, + }; + ++/* ++ * Bay Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: ++ * 000: 100 * 5 / 6 = 83.3333 MHz ++ * 001: 100 * 1 / 1 = 100.0000 MHz ++ * 010: 100 * 4 / 3 = 133.3333 MHz ++ * 011: 100 * 7 / 6 = 116.6667 MHz ++ * 100: 100 * 4 / 5 = 80.0000 MHz ++ */ + static const struct freq_desc freq_desc_byt = { +- 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } ++ .use_msr_plat = true, ++ .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, ++ { 4, 5 } }, ++ .mask = 0x07, + }; + ++/* ++ * Cherry Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: ++ * 0000: 100 * 5 / 6 = 83.3333 MHz ++ * 0001: 100 * 1 / 1 = 100.0000 MHz ++ * 0010: 100 * 4 / 3 = 133.3333 MHz ++ * 0011: 100 * 7 / 6 = 116.6667 MHz ++ * 0100: 100 * 4 / 5 = 80.0000 MHz ++ * 0101: 100 * 14 / 15 = 93.3333 MHz ++ * 0110: 100 * 9 / 10 = 90.0000 MHz ++ * 0111: 100 * 8 / 9 = 88.8889 MHz ++ * 1000: 100 * 7 / 8 = 87.5000 MHz ++ */ + static const struct freq_desc freq_desc_cht = { +- 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 } ++ .use_msr_plat = true, ++ .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, ++ { 4, 5 }, { 14, 15 }, { 9, 10 }, { 8, 9 }, ++ { 7, 8 } }, ++ .mask = 0x0f, + }; + ++/* ++ * Merriefield SDM MSR_FSB_FREQ frequencies simplified PLL model: ++ * 0001: 100 * 1 / 1 = 100.0000 MHz ++ * 0010: 100 * 4 / 3 = 133.3333 MHz ++ */ + static const struct freq_desc freq_desc_tng = { +- 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } ++ .use_msr_plat = true, ++ .muldiv = { { 0, 0 }, { 1, 1 }, { 4, 3 } }, ++ .mask = 0x07, + }; + ++/* ++ * Moorefield SDM MSR_FSB_FREQ frequencies simplified PLL model: ++ * 0000: 100 * 5 / 6 = 83.3333 MHz ++ * 0001: 100 * 1 / 1 = 100.0000 MHz ++ * 0010: 100 * 4 / 3 = 133.3333 MHz ++ * 0011: 100 * 1 / 1 = 100.0000 MHz ++ */ + static const struct freq_desc freq_desc_ann = { +- 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } ++ .use_msr_plat = true, ++ .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 1, 1 } }, ++ .mask = 0x0f, + }; + ++/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */ + static const struct freq_desc freq_desc_lgm = { +- 1, { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 } ++ .use_msr_plat = true, ++ .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, ++ .mask = 0x0f, + }; + + static const struct x86_cpu_id tsc_msr_cpu_ids[] = { +@@ -81,17 +159,19 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = { + */ + unsigned long cpu_khz_from_msr(void) + { +- u32 lo, hi, ratio, freq; ++ u32 lo, hi, ratio, freq, tscref; + const struct freq_desc *freq_desc; + const struct x86_cpu_id *id; ++ const struct muldiv *md; + unsigned long res; ++ int index; + + id = x86_match_cpu(tsc_msr_cpu_ids); + if (!id) + return 0; + + freq_desc = (struct freq_desc *)id->driver_data; +- if (freq_desc->msr_plat) { ++ if (freq_desc->use_msr_plat) { + rdmsr(MSR_PLATFORM_INFO, lo, hi); + ratio = (lo >> 8) & 0xff; + } else { +@@ -101,12 +181,28 @@ unsigned long cpu_khz_from_msr(void) + + /* Get FSB FREQ ID */ + rdmsr(MSR_FSB_FREQ, lo, hi); ++ index = lo & freq_desc->mask; ++ md = &freq_desc->muldiv[index]; + +- /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ +- freq = freq_desc->freqs[lo & 0x7]; ++ /* ++ * Note this also catches cases where the index points to an unpopulated ++ * part of muldiv, in that case the else will set freq and res to 0. ++ */ ++ if (md->divider) { ++ tscref = TSC_REFERENCE_KHZ * md->multiplier; ++ freq = DIV_ROUND_CLOSEST(tscref, md->divider); ++ /* ++ * Multiplying by ratio before the division has better ++ * accuracy than just calculating freq * ratio. ++ */ ++ res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider); ++ } else { ++ freq = freq_desc->freqs[index]; ++ res = freq * ratio; ++ } + +- /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ +- res = freq * ratio; ++ if (freq == 0) ++ pr_err("Error MSR_FSB_FREQ index %d is unknown\n", index); + + #ifdef CONFIG_X86_LOCAL_APIC + lapic_timer_period = (freq * 1000) / HZ; +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index a0b511360a0c..05da509c7060 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1930,6 +1930,10 @@ static struct kvm *svm_vm_alloc(void) + struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm), + GFP_KERNEL_ACCOUNT | __GFP_ZERO, + PAGE_KERNEL); ++ ++ if (!kvm_svm) ++ return NULL; ++ + return &kvm_svm->kvm; + } + +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 54a1a727249d..be99160166f2 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -3611,7 +3611,7 @@ static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) + vcpu->arch.exception.payload); + } + +-static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) ++static int vmx_check_nested_events(struct kvm_vcpu *vcpu) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long exit_qual; +@@ -3660,8 +3660,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) + return 0; + } + +- if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && +- nested_exit_on_intr(vcpu)) { ++ if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); +@@ -4309,17 +4308,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + + if (likely(!vmx->fail)) { +- /* +- * TODO: SDM says that with acknowledge interrupt on +- * exit, bit 31 of the VM-exit interrupt information +- * (valid interrupt) is always set to 1 on +- * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't +- * need kvm_cpu_has_interrupt(). See the commit +- * message for details. +- */ +- if (nested_exit_intr_ack_set(vcpu) && +- exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && +- kvm_cpu_has_interrupt(vcpu)) { ++ if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && ++ nested_exit_intr_ack_set(vcpu)) { + int irq = kvm_cpu_get_interrupt(vcpu); + WARN_ON(irq < 0); + vmcs12->vm_exit_intr_info = irq | +diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h +index 45eaedee2ac0..09b0937d56b1 100644 +--- a/arch/x86/kvm/vmx/ops.h ++++ b/arch/x86/kvm/vmx/ops.h +@@ -12,7 +12,8 @@ + + #define __ex(x) __kvm_handle_fault_on_reboot(x) + +-asmlinkage void vmread_error(unsigned long field, bool fault); ++__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field, ++ bool fault); + void vmwrite_error(unsigned long field, unsigned long value); + void vmclear_error(struct vmcs *vmcs, u64 phys_addr); + void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); +@@ -70,15 +71,28 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) + asm volatile("1: vmread %2, %1\n\t" + ".byte 0x3e\n\t" /* branch taken hint */ + "ja 3f\n\t" +- "mov %2, %%" _ASM_ARG1 "\n\t" +- "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t" +- "2: call vmread_error\n\t" +- "xor %k1, %k1\n\t" ++ ++ /* ++ * VMREAD failed. Push '0' for @fault, push the failing ++ * @field, and bounce through the trampoline to preserve ++ * volatile registers. ++ */ ++ "push $0\n\t" ++ "push %2\n\t" ++ "2:call vmread_error_trampoline\n\t" ++ ++ /* ++ * Unwind the stack. Note, the trampoline zeros out the ++ * memory for @fault so that the result is '0' on error. ++ */ ++ "pop %2\n\t" ++ "pop %1\n\t" + "3:\n\t" + ++ /* VMREAD faulted. As above, except push '1' for @fault. */ + ".pushsection .fixup, \"ax\"\n\t" +- "4: mov %2, %%" _ASM_ARG1 "\n\t" +- "mov $1, %%" _ASM_ARG2 "\n\t" ++ "4: push $1\n\t" ++ "push %2\n\t" + "jmp 2b\n\t" + ".popsection\n\t" + _ASM_EXTABLE(1b, 4b) +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S +index 81ada2ce99e7..861ae40e7144 100644 +--- a/arch/x86/kvm/vmx/vmenter.S ++++ b/arch/x86/kvm/vmx/vmenter.S +@@ -234,3 +234,61 @@ SYM_FUNC_START(__vmx_vcpu_run) + 2: mov $1, %eax + jmp 1b + SYM_FUNC_END(__vmx_vcpu_run) ++ ++/** ++ * vmread_error_trampoline - Trampoline from inline asm to vmread_error() ++ * @field: VMCS field encoding that failed ++ * @fault: %true if the VMREAD faulted, %false if it failed ++ ++ * Save and restore volatile registers across a call to vmread_error(). Note, ++ * all parameters are passed on the stack. ++ */ ++SYM_FUNC_START(vmread_error_trampoline) ++ push %_ASM_BP ++ mov %_ASM_SP, %_ASM_BP ++ ++ push %_ASM_AX ++ push %_ASM_CX ++ push %_ASM_DX ++#ifdef CONFIG_X86_64 ++ push %rdi ++ push %rsi ++ push %r8 ++ push %r9 ++ push %r10 ++ push %r11 ++#endif ++#ifdef CONFIG_X86_64 ++ /* Load @field and @fault to arg1 and arg2 respectively. */ ++ mov 3*WORD_SIZE(%rbp), %_ASM_ARG2 ++ mov 2*WORD_SIZE(%rbp), %_ASM_ARG1 ++#else ++ /* Parameters are passed on the stack for 32-bit (see asmlinkage). */ ++ push 3*WORD_SIZE(%ebp) ++ push 2*WORD_SIZE(%ebp) ++#endif ++ ++ call vmread_error ++ ++#ifndef CONFIG_X86_64 ++ add $8, %esp ++#endif ++ ++ /* Zero out @fault, which will be popped into the result register. */ ++ _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) ++ ++#ifdef CONFIG_X86_64 ++ pop %r11 ++ pop %r10 ++ pop %r9 ++ pop %r8 ++ pop %rsi ++ pop %rdi ++#endif ++ pop %_ASM_DX ++ pop %_ASM_CX ++ pop %_ASM_AX ++ pop %_ASM_BP ++ ++ ret ++SYM_FUNC_END(vmread_error_trampoline) +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 3e381b31b9a6..5c85de08dc94 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -664,43 +664,15 @@ void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) + } + + #ifdef CONFIG_KEXEC_CORE +-/* +- * This bitmap is used to indicate whether the vmclear +- * operation is enabled on all cpus. All disabled by +- * default. +- */ +-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; +- +-static inline void crash_enable_local_vmclear(int cpu) +-{ +- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- +-static inline void crash_disable_local_vmclear(int cpu) +-{ +- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- +-static inline int crash_local_vmclear_enabled(int cpu) +-{ +- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); +-} +- + static void crash_vmclear_local_loaded_vmcss(void) + { + int cpu = raw_smp_processor_id(); + struct loaded_vmcs *v; + +- if (!crash_local_vmclear_enabled(cpu)) +- return; +- + list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + vmcs_clear(v->vmcs); + } +-#else +-static inline void crash_enable_local_vmclear(int cpu) { } +-static inline void crash_disable_local_vmclear(int cpu) { } + #endif /* CONFIG_KEXEC_CORE */ + + static void __loaded_vmcs_clear(void *arg) +@@ -712,19 +684,24 @@ static void __loaded_vmcs_clear(void *arg) + return; /* vcpu migration can race with cpu offline */ + if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) + per_cpu(current_vmcs, cpu) = NULL; +- crash_disable_local_vmclear(cpu); ++ ++ vmcs_clear(loaded_vmcs->vmcs); ++ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) ++ vmcs_clear(loaded_vmcs->shadow_vmcs); ++ + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); + + /* +- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link +- * is before setting loaded_vmcs->vcpu to -1 which is done in +- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist +- * then adds the vmcs into percpu list before it is deleted. ++ * Ensure all writes to loaded_vmcs, including deleting it from its ++ * current percpu list, complete before setting loaded_vmcs->vcpu to ++ * -1, otherwise a different cpu can see vcpu == -1 first and add ++ * loaded_vmcs to its percpu list before it's deleted from this cpu's ++ * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). + */ + smp_wmb(); + +- loaded_vmcs_init(loaded_vmcs); +- crash_enable_local_vmclear(cpu); ++ loaded_vmcs->cpu = -1; ++ loaded_vmcs->launched = 0; + } + + void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) +@@ -1333,18 +1310,17 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) + if (!already_loaded) { + loaded_vmcs_clear(vmx->loaded_vmcs); + local_irq_disable(); +- crash_disable_local_vmclear(cpu); + + /* +- * Read loaded_vmcs->cpu should be before fetching +- * loaded_vmcs->loaded_vmcss_on_cpu_link. +- * See the comments in __loaded_vmcs_clear(). ++ * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to ++ * this cpu's percpu list, otherwise it may not yet be deleted ++ * from its previous cpu's percpu list. Pairs with the ++ * smb_wmb() in __loaded_vmcs_clear(). + */ + smp_rmb(); + + list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, + &per_cpu(loaded_vmcss_on_cpu, cpu)); +- crash_enable_local_vmclear(cpu); + local_irq_enable(); + } + +@@ -2256,21 +2232,6 @@ static int hardware_enable(void) + !hv_get_vp_assist_page(cpu)) + return -EFAULT; + +- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); +- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); +- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); +- +- /* +- * Now we can enable the vmclear operation in kdump +- * since the loaded_vmcss_on_cpu list on this cpu +- * has been initialized. +- * +- * Though the cpu is not in VMX operation now, there +- * is no problem to enable the vmclear operation +- * for the loaded_vmcss_on_cpu list is empty! +- */ +- crash_enable_local_vmclear(cpu); +- + rdmsrl(MSR_IA32_FEATURE_CONTROL, old); + + test_bits = FEATURE_CONTROL_LOCKED; +@@ -4485,8 +4446,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) + + static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) + { +- return (!to_vmx(vcpu)->nested.nested_run_pending && +- vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && ++ if (to_vmx(vcpu)->nested.nested_run_pending) ++ return false; ++ ++ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) ++ return true; ++ ++ return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); + } +@@ -6663,6 +6629,10 @@ static struct kvm *vmx_vm_alloc(void) + struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx), + GFP_KERNEL_ACCOUNT | __GFP_ZERO, + PAGE_KERNEL); ++ ++ if (!kvm_vmx) ++ return NULL; ++ + return &kvm_vmx->kvm; + } + +@@ -8032,7 +8002,7 @@ module_exit(vmx_exit); + + static int __init vmx_init(void) + { +- int r; ++ int r, cpu; + + #if IS_ENABLED(CONFIG_HYPERV) + /* +@@ -8086,6 +8056,12 @@ static int __init vmx_init(void) + return r; + } + ++ for_each_possible_cpu(cpu) { ++ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); ++ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); ++ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); ++ } ++ + #ifdef CONFIG_KEXEC_CORE + rcu_assign_pointer(crash_vmclear_loaded_vmcss, + crash_vmclear_local_loaded_vmcss); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index dafb5aff200f..8d2b2d5033cd 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -7572,7 +7572,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) + kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); + } + +-static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ++static int inject_pending_event(struct kvm_vcpu *vcpu) + { + int r; + +@@ -7608,7 +7608,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) + * from L2 to L1. + */ + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); ++ r = kvm_x86_ops->check_nested_events(vcpu); + if (r != 0) + return r; + } +@@ -7670,7 +7670,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) + * KVM_REQ_EVENT only on certain events and not unconditionally? + */ + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); ++ r = kvm_x86_ops->check_nested_events(vcpu); + if (r != 0) + return r; + } +@@ -8159,7 +8159,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + goto out; + } + +- if (inject_pending_event(vcpu, req_int_win) != 0) ++ if (inject_pending_event(vcpu) != 0) + req_immediate_exit = true; + else { + /* Enable SMI/NMI/IRQ window open exits if needed. +@@ -8389,7 +8389,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) + static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) + { + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) +- kvm_x86_ops->check_nested_events(vcpu, false); ++ kvm_x86_ops->check_nested_events(vcpu); + + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted); +@@ -9768,6 +9768,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + { + int i; + ++ /* ++ * Clear out the previous array pointers for the KVM_MR_MOVE case. The ++ * old arrays will be freed by __kvm_set_memory_region() if installing ++ * the new memslot is successful. ++ */ ++ memset(&slot->arch, 0, sizeof(slot->arch)); ++ + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + struct kvm_lpage_info *linfo; + unsigned long ugfn; +@@ -9849,6 +9856,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) + { ++ if (change == KVM_MR_MOVE) ++ return kvm_arch_create_memslot(kvm, memslot, ++ mem->memory_size >> PAGE_SHIFT); ++ + return 0; + } + +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index ad4dd3a97753..89ae6adfc4c4 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -85,6 +85,8 @@ static const unsigned long * const efi_tables[] = { + #ifdef CONFIG_EFI_RCI2_TABLE + &rci2_table_phys, + #endif ++ &efi.tpm_log, ++ &efi.tpm_final_log, + }; + + u64 efi_setup; /* efi setup_data physical address */ +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index fe0e647411da..e39c930cfbd1 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -834,7 +834,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, + phys_vendor = virt_to_phys_or_null(vnd); + phys_data = virt_to_phys_or_null_size(data, data_size); + +- if (!phys_name || !phys_data) ++ if (!phys_name || (data && !phys_data)) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, +@@ -865,7 +865,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, + phys_vendor = virt_to_phys_or_null(vnd); + phys_data = virt_to_phys_or_null_size(data, data_size); + +- if (!phys_name || !phys_data) ++ if (!phys_name || (data && !phys_data)) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c +index 5a64607ce774..a6d42339fb34 100644 +--- a/block/bfq-cgroup.c ++++ b/block/bfq-cgroup.c +@@ -641,6 +641,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + { + struct bfq_entity *entity = &bfqq->entity; + ++ /* ++ * Get extra reference to prevent bfqq from being freed in ++ * next possible expire or deactivate. ++ */ ++ bfqq->ref++; ++ + /* If bfqq is empty, then bfq_bfqq_expire also invokes + * bfq_del_bfqq_busy, thereby removing bfqq and its entity + * from data structures related to current group. Otherwise we +@@ -651,12 +657,6 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bfq_bfqq_expire(bfqd, bfqd->in_service_queue, + false, BFQQE_PREEMPTED); + +- /* +- * get extra reference to prevent bfqq from being freed in +- * next possible deactivate +- */ +- bfqq->ref++; +- + if (bfq_bfqq_busy(bfqq)) + bfq_deactivate_bfqq(bfqd, bfqq, false, false); + else if (entity->on_st) +@@ -676,7 +676,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + + if (!bfqd->in_service_queue && !bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); +- /* release extra ref taken above */ ++ /* release extra ref taken above, bfqq may happen to be freed now */ + bfq_put_queue(bfqq); + } + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 8fe4b6919511..43fbe5d096e3 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -6214,20 +6214,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) + return bfqq; + } + +-static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) ++static void ++bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) + { +- struct bfq_data *bfqd = bfqq->bfqd; + enum bfqq_expiration reason; + unsigned long flags; + + spin_lock_irqsave(&bfqd->lock, flags); +- bfq_clear_bfqq_wait_request(bfqq); + ++ /* ++ * Considering that bfqq may be in race, we should firstly check ++ * whether bfqq is in service before doing something on it. If ++ * the bfqq in race is not in service, it has already been expired ++ * through __bfq_bfqq_expire func and its wait_request flags has ++ * been cleared in __bfq_bfqd_reset_in_service func. ++ */ + if (bfqq != bfqd->in_service_queue) { + spin_unlock_irqrestore(&bfqd->lock, flags); + return; + } + ++ bfq_clear_bfqq_wait_request(bfqq); ++ + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired +@@ -6272,7 +6280,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) + * early. + */ + if (bfqq) +- bfq_idle_slice_timer_body(bfqq); ++ bfq_idle_slice_timer_body(bfqd, bfqq); + + return HRTIMER_NORESTART; + } +diff --git a/block/blk-ioc.c b/block/blk-ioc.c +index 5ed59ac6ae58..9df50fb507ca 100644 +--- a/block/blk-ioc.c ++++ b/block/blk-ioc.c +@@ -84,6 +84,7 @@ static void ioc_destroy_icq(struct io_cq *icq) + * making it impossible to determine icq_cache. Record it in @icq. + */ + icq->__rcu_icq_cache = et->icq_cache; ++ icq->flags |= ICQ_DESTROYED; + call_rcu(&icq->__rcu_head, icq_free_icq_rcu); + } + +@@ -212,15 +213,21 @@ static void __ioc_clear_queue(struct list_head *icq_list) + { + unsigned long flags; + ++ rcu_read_lock(); + while (!list_empty(icq_list)) { + struct io_cq *icq = list_entry(icq_list->next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock_irqsave(&ioc->lock, flags); ++ if (icq->flags & ICQ_DESTROYED) { ++ spin_unlock_irqrestore(&ioc->lock, flags); ++ continue; ++ } + ioc_destroy_icq(icq); + spin_unlock_irqrestore(&ioc->lock, flags); + } ++ rcu_read_unlock(); + } + + /** +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 7d7800e95895..8391e8e2a504 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2766,7 +2766,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, + memcpy(new_hctxs, hctxs, q->nr_hw_queues * + sizeof(*hctxs)); + q->queue_hw_ctx = new_hctxs; +- q->nr_hw_queues = set->nr_hw_queues; + kfree(hctxs); + hctxs = new_hctxs; + } +diff --git a/block/blk-settings.c b/block/blk-settings.c +index c8eda2e7b91e..be1dca0103a4 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -664,6 +664,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } ++ ++ t->backing_dev_info->io_pages = ++ t->limits.max_sectors >> (PAGE_SHIFT - 9); + } + EXPORT_SYMBOL(disk_stack_limits); + +diff --git a/block/blk-zoned.c b/block/blk-zoned.c +index d00fcfd71dfe..eb27e80e9075 100644 +--- a/block/blk-zoned.c ++++ b/block/blk-zoned.c +@@ -173,7 +173,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, + if (!op_is_zone_mgmt(op)) + return -EOPNOTSUPP; + +- if (!nr_sectors || end_sector > capacity) ++ if (end_sector <= sector || end_sector > capacity) + /* Out of range */ + return -EINVAL; + +diff --git a/crypto/rng.c b/crypto/rng.c +index 1e21231f71c9..1490d210f1a1 100644 +--- a/crypto/rng.c ++++ b/crypto/rng.c +@@ -37,12 +37,16 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) + crypto_stats_get(alg); + if (!seed && slen) { + buf = kmalloc(slen, GFP_KERNEL); +- if (!buf) ++ if (!buf) { ++ crypto_alg_put(alg); + return -ENOMEM; ++ } + + err = get_random_bytes_wait(buf, slen); +- if (err) ++ if (err) { ++ crypto_alg_put(alg); + goto out; ++ } + seed = buf; + } + +diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h +index a74c1a0e892d..c0e243668261 100644 +--- a/drivers/acpi/acpica/achware.h ++++ b/drivers/acpi/acpica/achware.h +@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); + + acpi_status acpi_hw_enable_all_wakeup_gpes(void); + +-u8 acpi_hw_check_all_gpes(void); ++u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number); + + acpi_status + acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, +diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c +index 84b0b410310e..3e1813ebcca8 100644 +--- a/drivers/acpi/acpica/evxfgpe.c ++++ b/drivers/acpi/acpica/evxfgpe.c +@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) + * + * FUNCTION: acpi_any_gpe_status_set + * +- * PARAMETERS: None ++ * PARAMETERS: gpe_skip_number - Number of the GPE to skip + * + * RETURN: Whether or not the status bit is set for any GPE + * +- * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any +- * of them is set or FALSE otherwise. ++ * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one ++ * represented by the "skip" argument, and return TRUE if any of ++ * them is set or FALSE otherwise. + * + ******************************************************************************/ +-u32 acpi_any_gpe_status_set(void) ++u32 acpi_any_gpe_status_set(u32 gpe_skip_number) + { + acpi_status status; ++ acpi_handle gpe_device; + u8 ret; + + ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); +@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void) + return (FALSE); + } + +- ret = acpi_hw_check_all_gpes(); ++ status = acpi_get_gpe_device(gpe_skip_number, &gpe_device); ++ if (ACPI_FAILURE(status)) { ++ gpe_device = NULL; ++ } ++ ++ ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return (ret); +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c +index b1d7d5f92495..12516b07336e 100644 +--- a/drivers/acpi/acpica/hwgpe.c ++++ b/drivers/acpi/acpica/hwgpe.c +@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + return (AE_OK); + } + ++struct acpi_gpe_block_status_context { ++ struct acpi_gpe_register_info *gpe_skip_register_info; ++ u8 gpe_skip_mask; ++ u8 retval; ++}; ++ + /****************************************************************************** + * + * FUNCTION: acpi_hw_get_gpe_block_status + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info ++ * context - GPE list walk context data + * + * RETURN: Success + * +@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + static acpi_status + acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, +- void *ret_ptr) ++ void *context) + { ++ struct acpi_gpe_block_status_context *c = context; + struct acpi_gpe_register_info *gpe_register_info; + u64 in_enable, in_status; + acpi_status status; +- u8 *ret = ret_ptr; ++ u8 ret_mask; + u32 i; + + /* Examine each GPE Register within the block */ +@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + continue; + } + +- *ret |= in_enable & in_status; ++ ret_mask = in_enable & in_status; ++ if (ret_mask && c->gpe_skip_register_info == gpe_register_info) { ++ ret_mask &= ~c->gpe_skip_mask; ++ } ++ c->retval |= ret_mask; + } + + return (AE_OK); +@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) + * + * FUNCTION: acpi_hw_check_all_gpes + * +- * PARAMETERS: None ++ * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip ++ * gpe_skip_number - Number of the GPE to skip + * + * RETURN: Combined status of all GPEs + * +- * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the ++ * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one ++ * represented by the "skip" arguments, and return TRUE if the + * status bit is set for at least one of them of FALSE otherwise. + * + ******************************************************************************/ + +-u8 acpi_hw_check_all_gpes(void) ++u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number) + { +- u8 ret = 0; ++ struct acpi_gpe_block_status_context context = { ++ .gpe_skip_register_info = NULL, ++ .retval = 0, ++ }; ++ struct acpi_gpe_event_info *gpe_event_info; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); + +- (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); ++ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); ++ ++ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device, ++ gpe_skip_number); ++ if (gpe_event_info) { ++ context.gpe_skip_register_info = gpe_event_info->register_info; ++ context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info); ++ } ++ ++ acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + +- return (ret != 0); ++ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context); ++ return (context.retval != 0); + } + + #endif /* !ACPI_REDUCED_HARDWARE */ +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index bd74c7836675..9a82841c5b5a 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1592,14 +1592,19 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, + return ret; + + /* First EC capable of handling transactions */ +- if (!first_ec) { ++ if (!first_ec) + first_ec = ec; +- acpi_handle_info(first_ec->handle, "Used as first EC\n"); ++ ++ pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, ++ ec->data_addr); ++ ++ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { ++ if (ec->gpe >= 0) ++ pr_info("GPE=0x%x\n", ec->gpe); ++ else ++ pr_info("IRQ=%d\n", ec->irq); + } + +- acpi_handle_info(ec->handle, +- "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", +- ec->gpe, ec->irq, ec->command_addr, ec->data_addr); + return ret; + } + +@@ -1649,7 +1654,6 @@ static int acpi_ec_add(struct acpi_device *device) + + if (boot_ec && ec->command_addr == boot_ec->command_addr && + ec->data_addr == boot_ec->data_addr) { +- boot_ec_is_ecdt = false; + /* + * Trust PNP0C09 namespace location rather than + * ECDT ID. But trust ECDT GPE rather than _GPE +@@ -1669,9 +1673,12 @@ static int acpi_ec_add(struct acpi_device *device) + + if (ec == boot_ec) + acpi_handle_info(boot_ec->handle, +- "Boot %s EC used to handle transactions and events\n", ++ "Boot %s EC initialization complete\n", + boot_ec_is_ecdt ? "ECDT" : "DSDT"); + ++ acpi_handle_info(ec->handle, ++ "EC: Used to handle transactions and events\n"); ++ + device->driver_data = ec; + + ret = !!request_region(ec->data_addr, 1, "EC data"); +@@ -2045,6 +2052,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action) + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); + } + ++bool acpi_ec_other_gpes_active(void) ++{ ++ return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX); ++} ++ + bool acpi_ec_dispatch_gpe(void) + { + u32 ret; +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 3616daec650b..d44c591c4ee4 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); + + #ifdef CONFIG_PM_SLEEP + void acpi_ec_flush_work(void); ++bool acpi_ec_other_gpes_active(void); + bool acpi_ec_dispatch_gpe(void); + #endif + +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index abd39cc5ff88..ce1d9048a36d 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -1014,18 +1014,19 @@ static bool acpi_s2idle_wake(void) + return true; + + /* +- * If there are no EC events to process and at least one of the +- * other enabled GPEs is active, the wakeup is regarded as a +- * genuine one. +- * +- * Note that the checks below must be carried out in this order +- * to avoid returning prematurely due to a change of the EC GPE +- * status bit from unset to set between the checks with the +- * status bits of all the other GPEs unset. ++ * If the status bit is set for any enabled GPE other than the ++ * EC one, the wakeup is regarded as a genuine one. + */ +- if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) ++ if (acpi_ec_other_gpes_active()) + return true; + ++ /* ++ * If the EC GPE status bit has not been set, the wakeup is ++ * regarded as a spurious one. ++ */ ++ if (!acpi_ec_dispatch_gpe()) ++ return false; ++ + /* + * Cancel the wakeup and process all pending events in case + * there are any wakeup ones in there. +diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c +index 3ff14071617c..79f2aeeb482a 100644 +--- a/drivers/ata/libata-pmp.c ++++ b/drivers/ata/libata-pmp.c +@@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, + + if (dev->flags & ATA_DFLAG_DETACH) { + detach = 1; ++ rc = -ENODEV; + goto fail; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 58e09ffe8b9c..5af34a3201ed 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -4553,22 +4553,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) + */ + shost->max_host_blocked = 1; + +- rc = scsi_add_host_with_dma(ap->scsi_host, +- &ap->tdev, ap->host->dev); ++ rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); + if (rc) +- goto err_add; ++ goto err_alloc; + } + + return 0; + +- err_add: +- scsi_host_put(host->ports[i]->scsi_host); + err_alloc: + while (--i >= 0) { + struct Scsi_Host *shost = host->ports[i]->scsi_host; + ++ /* scsi_host_put() is in ata_devres_release() */ + scsi_remove_host(shost); +- scsi_host_put(shost); + } + return rc; + } +diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c +index 62ee90b4db56..70efbb22dfc3 100644 +--- a/drivers/base/firmware_loader/fallback.c ++++ b/drivers/base/firmware_loader/fallback.c +@@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, + } + + retval = fw_sysfs_wait_timeout(fw_priv, timeout); +- if (retval < 0) { ++ if (retval < 0 && retval != -ENOENT) { + mutex_lock(&fw_lock); + fw_load_abort(fw_sysfs); + mutex_unlock(&fw_lock); +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index 8e5725b11ee8..b3c91f2a1e2f 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -2615,7 +2615,7 @@ static int genpd_iterate_idle_states(struct device_node *dn, + + ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); + if (ret <= 0) +- return ret; ++ return ret == -ENOENT ? 0 : ret; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c +index 70a9edb5f525..12389457723f 100644 +--- a/drivers/base/power/wakeup.c ++++ b/drivers/base/power/wakeup.c +@@ -241,7 +241,9 @@ void wakeup_source_unregister(struct wakeup_source *ws) + { + if (ws) { + wakeup_source_remove(ws); +- wakeup_source_sysfs_remove(ws); ++ if (ws->dev) ++ wakeup_source_sysfs_remove(ws); ++ + wakeup_source_destroy(ws); + } + } +diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c +index ae8d4bc532b0..0cafad09c9b2 100644 +--- a/drivers/block/null_blk_main.c ++++ b/drivers/block/null_blk_main.c +@@ -605,6 +605,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) + if (tag != -1U) { + cmd = &nq->cmds[tag]; + cmd->tag = tag; ++ cmd->error = BLK_STS_OK; + cmd->nq = nq; + if (nq->dev->irqmode == NULL_IRQ_TIMER) { + hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, +@@ -1385,6 +1386,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, + cmd->timer.function = null_cmd_timer_expired; + } + cmd->rq = bd->rq; ++ cmd->error = BLK_STS_OK; + cmd->nq = nq; + + blk_mq_start_request(bd->rq); +@@ -1432,7 +1434,12 @@ static void cleanup_queues(struct nullb *nullb) + + static void null_del_dev(struct nullb *nullb) + { +- struct nullb_device *dev = nullb->dev; ++ struct nullb_device *dev; ++ ++ if (!nullb) ++ return; ++ ++ dev = nullb->dev; + + ida_simple_remove(&nullb_indexes, nullb->index); + +@@ -1790,6 +1797,7 @@ out_cleanup_queues: + cleanup_queues(nullb); + out_free_nullb: + kfree(nullb); ++ dev->nullb = NULL; + out: + return rv; + } +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index c02be06c5299..ab5482202cfb 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -47,6 +47,7 @@ + #include <linux/bitmap.h> + #include <linux/list.h> + #include <linux/workqueue.h> ++#include <linux/sched/mm.h> + + #include <xen/xen.h> + #include <xen/xenbus.h> +@@ -2188,10 +2189,12 @@ static void blkfront_setup_discard(struct blkfront_info *info) + + static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) + { +- unsigned int psegs, grants; ++ unsigned int psegs, grants, memflags; + int err, i; + struct blkfront_info *info = rinfo->dev_info; + ++ memflags = memalloc_noio_save(); ++ + if (info->max_indirect_segments == 0) { + if (!HAS_EXTRA_REQ) + grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; +@@ -2223,7 +2226,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) + + BUG_ON(!list_empty(&rinfo->indirect_pages)); + for (i = 0; i < num; i++) { +- struct page *indirect_page = alloc_page(GFP_NOIO); ++ struct page *indirect_page = alloc_page(GFP_KERNEL); + if (!indirect_page) + goto out_of_memory; + list_add(&indirect_page->lru, &rinfo->indirect_pages); +@@ -2234,15 +2237,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) + rinfo->shadow[i].grants_used = + kvcalloc(grants, + sizeof(rinfo->shadow[i].grants_used[0]), +- GFP_NOIO); ++ GFP_KERNEL); + rinfo->shadow[i].sg = kvcalloc(psegs, + sizeof(rinfo->shadow[i].sg[0]), +- GFP_NOIO); ++ GFP_KERNEL); + if (info->max_indirect_segments) + rinfo->shadow[i].indirect_grants = + kvcalloc(INDIRECT_GREFS(grants), + sizeof(rinfo->shadow[i].indirect_grants[0]), +- GFP_NOIO); ++ GFP_KERNEL); + if ((rinfo->shadow[i].grants_used == NULL) || + (rinfo->shadow[i].sg == NULL) || + (info->max_indirect_segments && +@@ -2251,6 +2254,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) + sg_init_table(rinfo->shadow[i].sg, psegs); + } + ++ memalloc_noio_restore(memflags); + + return 0; + +@@ -2270,6 +2274,9 @@ out_of_memory: + __free_page(indirect_page); + } + } ++ ++ memalloc_noio_restore(memflags); ++ + return -ENOMEM; + } + +diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c +index be79d6c6a4e4..1bb00a959c67 100644 +--- a/drivers/bus/sunxi-rsb.c ++++ b/drivers/bus/sunxi-rsb.c +@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, + if (ret) + goto unlock; + +- *buf = readl(rsb->regs + RSB_DATA); ++ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); + + unlock: + mutex_unlock(&rsb->lock); +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index cad9563f8f48..4c51f794d04c 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -3188,8 +3188,8 @@ static void __get_guid(struct ipmi_smi *intf) + if (rv) + /* Send failed, no GUID available. */ + bmc->dyn_guid_set = 0; +- +- wait_event(intf->waitq, bmc->dyn_guid_set != 2); ++ else ++ wait_event(intf->waitq, bmc->dyn_guid_set != 2); + + /* dyn_guid_set makes the guid data available. */ + smp_rmb(); +diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c +index 7a0fca659b6a..7460f230bae4 100644 +--- a/drivers/char/tpm/eventlog/common.c ++++ b/drivers/char/tpm/eventlog/common.c +@@ -99,11 +99,8 @@ static int tpm_read_log(struct tpm_chip *chip) + * + * If an event log is found then the securityfs files are setup to + * export it to userspace, otherwise nothing is done. +- * +- * Returns -ENODEV if the firmware has no event log or securityfs is not +- * supported. + */ +-int tpm_bios_log_setup(struct tpm_chip *chip) ++void tpm_bios_log_setup(struct tpm_chip *chip) + { + const char *name = dev_name(&chip->dev); + unsigned int cnt; +@@ -112,7 +109,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip) + + rc = tpm_read_log(chip); + if (rc < 0) +- return rc; ++ return; + log_version = rc; + + cnt = 0; +@@ -158,13 +155,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip) + cnt++; + } + +- return 0; ++ return; + + err: +- rc = PTR_ERR(chip->bios_dir[cnt]); + chip->bios_dir[cnt] = NULL; + tpm_bios_log_teardown(chip); +- return rc; ++ return; + } + + void tpm_bios_log_teardown(struct tpm_chip *chip) +diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c +index 739b1d9d16b6..2c96977ad080 100644 +--- a/drivers/char/tpm/eventlog/tpm1.c ++++ b/drivers/char/tpm/eventlog/tpm1.c +@@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, + u32 converted_event_size; + u32 converted_event_type; + ++ (*pos)++; + converted_event_size = do_endian_conversion(event->event_size); + + v += sizeof(struct tcpa_event) + converted_event_size; +@@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, + ((v + sizeof(struct tcpa_event) + converted_event_size) > limit)) + return NULL; + +- (*pos)++; + return v; + } + +diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c +index b9aeda1cbcd7..e741b1157525 100644 +--- a/drivers/char/tpm/eventlog/tpm2.c ++++ b/drivers/char/tpm/eventlog/tpm2.c +@@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, + size_t event_size; + void *marker; + ++ (*pos)++; + event_header = log->bios_event_log; + + if (v == SEQ_START_TOKEN) { +@@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, + if (((v + event_size) >= limit) || (event_size == 0)) + return NULL; + +- (*pos)++; + return v; + } + +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c +index 3d6d394a8661..58073836b555 100644 +--- a/drivers/char/tpm/tpm-chip.c ++++ b/drivers/char/tpm/tpm-chip.c +@@ -596,9 +596,7 @@ int tpm_chip_register(struct tpm_chip *chip) + + tpm_sysfs_add_device(chip); + +- rc = tpm_bios_log_setup(chip); +- if (rc != 0 && rc != -ENODEV) +- return rc; ++ tpm_bios_log_setup(chip); + + tpm_add_ppi(chip); + +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h +index 5620747da0cf..2b2c225e1190 100644 +--- a/drivers/char/tpm/tpm.h ++++ b/drivers/char/tpm/tpm.h +@@ -235,7 +235,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, + int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, + size_t *bufsiz); + +-int tpm_bios_log_setup(struct tpm_chip *chip); ++void tpm_bios_log_setup(struct tpm_chip *chip); + void tpm_bios_log_teardown(struct tpm_chip *chip); + int tpm_dev_common_init(void); + void tpm_dev_common_exit(void); +diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c +index 956dd653a43d..c051ecba5cf8 100644 +--- a/drivers/clk/ingenic/jz4770-cgu.c ++++ b/drivers/clk/ingenic/jz4770-cgu.c +@@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np) + + cgu = ingenic_cgu_new(jz4770_cgu_clocks, + ARRAY_SIZE(jz4770_cgu_clocks), np); +- if (!cgu) ++ if (!cgu) { + pr_err("%s: failed to initialise CGU\n", __func__); ++ return; ++ } + + retval = ingenic_cgu_register_clocks(cgu); + if (retval) +diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c +index ad7daa494fd4..cd537c3db782 100644 +--- a/drivers/clk/ingenic/tcu.c ++++ b/drivers/clk/ingenic/tcu.c +@@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate, + u8 prescale; + + if (req_rate > rate) +- return -EINVAL; ++ return rate; + + prescale = ingenic_tcu_get_prescale(rate, req_rate); + +diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c +index 648a09a1778a..edef3399c979 100644 +--- a/drivers/cpufreq/imx6q-cpufreq.c ++++ b/drivers/cpufreq/imx6q-cpufreq.c +@@ -280,6 +280,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) + void __iomem *base; + + np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); ++ if (!np) ++ np = of_find_compatible_node(NULL, NULL, ++ "fsl,imx6ull-ocotp"); + if (!np) + return -ENOENT; + +@@ -378,23 +381,24 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) + goto put_reg; + } + ++ /* Because we have added the OPPs here, we must free them */ ++ free_opp = true; ++ + if (of_machine_is_compatible("fsl,imx6ul") || + of_machine_is_compatible("fsl,imx6ull")) { + ret = imx6ul_opp_check_speed_grading(cpu_dev); + if (ret) { + if (ret == -EPROBE_DEFER) +- goto put_node; ++ goto out_free_opp; + + dev_err(cpu_dev, "failed to read ocotp: %d\n", + ret); +- goto put_node; ++ goto out_free_opp; + } + } else { + imx6q_opp_check_speed_grading(cpu_dev); + } + +- /* Because we have added the OPPs here, we must free them */ +- free_opp = true; + num = dev_pm_opp_get_opp_count(cpu_dev); + if (num < 0) { + ret = num; +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index 56f4bc0d209e..1806b1da4366 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -1080,6 +1080,12 @@ free_and_return: + + static inline void clean_chip_info(void) + { ++ int i; ++ ++ /* flush any pending work items */ ++ if (chips) ++ for (i = 0; i < nr_chips; i++) ++ cancel_work_sync(&chips[i].throttle); + kfree(chips); + } + +diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c +index aa9ccca67045..d6c58184bb57 100644 +--- a/drivers/crypto/caam/caamalg_desc.c ++++ b/drivers/crypto/caam/caamalg_desc.c +@@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, + const u32 ctx1_iv_off) + { + u32 *key_jump_cmd; ++ u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT; ++ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == ++ OP_ALG_ALGSEL_CHACHA20); + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ +@@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, + LDST_OFFSET_SHIFT)); + + /* Load operation */ +- append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | +- OP_ALG_ENCRYPT); ++ if (is_chacha20) ++ options |= OP_ALG_AS_FINALIZE; ++ append_operation(desc, options); + + /* Perform operation */ + skcipher_append_src_dst(desc); + + /* Store IV */ +- if (ivsize) ++ if (!is_chacha20 && ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); +@@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, + const u32 ctx1_iv_off) + { + u32 *key_jump_cmd; ++ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == ++ OP_ALG_ALGSEL_CHACHA20); + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ +@@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, + skcipher_append_src_dst(desc); + + /* Store IV */ +- if (ivsize) ++ if (!is_chacha20 && ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); +@@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); + */ + void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) + { +- __be64 sector_size = cpu_to_be64(512); ++ /* ++ * Set sector size to a big value, practically disabling ++ * sector size segmentation in xts implementation. We cannot ++ * take full advantage of this HW feature with existing ++ * crypto API / dm-crypt SW architecture. ++ */ ++ __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +@@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); + */ + void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) + { +- __be64 sector_size = cpu_to_be64(512); ++ /* ++ * Set sector size to a big value, practically disabling ++ * sector size segmentation in xts implementation. We cannot ++ * take full advantage of this HW feature with existing ++ * crypto API / dm-crypt SW architecture. ++ */ ++ __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); +diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c +index a72586eccd81..954f14bddf1d 100644 +--- a/drivers/crypto/ccree/cc_buffer_mgr.c ++++ b/drivers/crypto/ccree/cc_buffer_mgr.c +@@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev, + { + unsigned int nents = 0; + ++ *lbytes = 0; ++ + while (nbytes && sg_list) { + nents++; + /* get the number of bytes in the last entry */ +@@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev, + nbytes : sg_list->length; + sg_list = sg_next(sg_list); + } ++ + dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); + return nents; + } +@@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, + unsigned int nbytes, int direction, u32 *nents, + u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) + { +- if (sg_is_last(sg)) { +- /* One entry only case -set to DLLI */ +- if (dma_map_sg(dev, sg, 1, direction) != 1) { +- dev_err(dev, "dma_map_sg() single buffer failed\n"); +- return -ENOMEM; +- } +- dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", +- &sg_dma_address(sg), sg_page(sg), sg_virt(sg), +- sg->offset, sg->length); +- *lbytes = nbytes; +- *nents = 1; +- *mapped_nents = 1; +- } else { /*sg_is_last*/ +- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); +- if (*nents > max_sg_nents) { +- *nents = 0; +- dev_err(dev, "Too many fragments. current %d max %d\n", +- *nents, max_sg_nents); +- return -ENOMEM; +- } +- /* In case of mmu the number of mapped nents might +- * be changed from the original sgl nents +- */ +- *mapped_nents = dma_map_sg(dev, sg, *nents, direction); +- if (*mapped_nents == 0) { +- *nents = 0; +- dev_err(dev, "dma_map_sg() sg buffer failed\n"); +- return -ENOMEM; +- } ++ int ret = 0; ++ ++ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); ++ if (*nents > max_sg_nents) { ++ *nents = 0; ++ dev_err(dev, "Too many fragments. current %d max %d\n", ++ *nents, max_sg_nents); ++ return -ENOMEM; ++ } ++ ++ ret = dma_map_sg(dev, sg, *nents, direction); ++ if (dma_mapping_error(dev, ret)) { ++ *nents = 0; ++ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); ++ return -ENOMEM; + } + ++ *mapped_nents = ret; ++ + return 0; + } + +@@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) + sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, + areq_ctx->assoclen, req->cryptlen); + +- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); ++ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, ++ DMA_BIDIRECTIONAL); + if (req->src != req->dst) { + dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", + sg_virt(req->dst)); +- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), ++ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, + DMA_BIDIRECTIONAL); + } + if (drvdata->coherent && +@@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, + &src_last_bytes); + sg_index = areq_ctx->src_sgl->length; + //check where the data starts +- while (sg_index <= size_to_skip) { ++ while (src_mapped_nents && (sg_index <= size_to_skip)) { + src_mapped_nents--; + offset -= areq_ctx->src_sgl->length; + sgl = sg_next(areq_ctx->src_sgl); +@@ -902,13 +894,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, + + if (req->src != req->dst) { + size_for_map = areq_ctx->assoclen + req->cryptlen; +- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? +- authsize : 0; ++ ++ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ++ size_for_map += authsize; ++ else ++ size_for_map -= authsize; ++ + if (is_gcm4543) + size_for_map += crypto_aead_ivsize(tfm); + + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, +- &areq_ctx->dst.nents, ++ &areq_ctx->dst.mapped_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, + &dst_mapped_nents); + if (rc) +@@ -921,7 +917,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, + offset = size_to_skip; + + //check where the data starts +- while (sg_index <= size_to_skip) { ++ while (dst_mapped_nents && sg_index <= size_to_skip) { + dst_mapped_nents--; + offset -= areq_ctx->dst_sgl->length; + sgl = sg_next(areq_ctx->dst_sgl); +@@ -1117,13 +1113,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) + } + + size_to_map = req->cryptlen + areq_ctx->assoclen; +- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) ++ /* If we do in-place encryption, we also need the auth tag */ ++ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && ++ (req->src == req->dst)) { + size_to_map += authsize; +- ++ } + if (is_gcm4543) + size_to_map += crypto_aead_ivsize(tfm); + rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, +- &areq_ctx->src.nents, ++ &areq_ctx->src.mapped_nents, + (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + + LLI_MAX_NUM_OF_DATA_ENTRIES), + &dummy, &mapped_nents); +diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h +index af434872c6ff..827b6cb1236e 100644 +--- a/drivers/crypto/ccree/cc_buffer_mgr.h ++++ b/drivers/crypto/ccree/cc_buffer_mgr.h +@@ -25,6 +25,7 @@ enum cc_sg_cpy_direct { + + struct cc_mlli { + cc_sram_addr_t sram_addr; ++ unsigned int mapped_nents; + unsigned int nents; //sg nents + unsigned int mlli_nents; //mlli nents might be different than the above + }; +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c +index f438b425c655..9861624aa077 100644 +--- a/drivers/crypto/mxs-dcp.c ++++ b/drivers/crypto/mxs-dcp.c +@@ -20,6 +20,7 @@ + #include <crypto/sha.h> + #include <crypto/internal/hash.h> + #include <crypto/internal/skcipher.h> ++#include <crypto/scatterwalk.h> + + #define DCP_MAX_CHANS 4 + #define DCP_BUF_SZ PAGE_SIZE +@@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); +- const int nents = sg_nents(req->src); + + uint8_t *in_buf = sdcp->coh->sha_in_buf; + uint8_t *out_buf = sdcp->coh->sha_out_buf; + +- uint8_t *src_buf; +- + struct scatterlist *src; + +- unsigned int i, len, clen; ++ unsigned int i, len, clen, oft = 0; + int ret; + + int fin = rctx->fini; + if (fin) + rctx->fini = 0; + +- for_each_sg(req->src, src, nents, i) { +- src_buf = sg_virt(src); +- len = sg_dma_len(src); +- +- do { +- if (actx->fill + len > DCP_BUF_SZ) +- clen = DCP_BUF_SZ - actx->fill; +- else +- clen = len; +- +- memcpy(in_buf + actx->fill, src_buf, clen); +- len -= clen; +- src_buf += clen; +- actx->fill += clen; ++ src = req->src; ++ len = req->nbytes; + +- /* +- * If we filled the buffer and still have some +- * more data, submit the buffer. +- */ +- if (len && actx->fill == DCP_BUF_SZ) { +- ret = mxs_dcp_run_sha(req); +- if (ret) +- return ret; +- actx->fill = 0; +- rctx->init = 0; +- } +- } while (len); ++ while (len) { ++ if (actx->fill + len > DCP_BUF_SZ) ++ clen = DCP_BUF_SZ - actx->fill; ++ else ++ clen = len; ++ ++ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, ++ 0); ++ ++ len -= clen; ++ oft += clen; ++ actx->fill += clen; ++ ++ /* ++ * If we filled the buffer and still have some ++ * more data, submit the buffer. ++ */ ++ if (len && actx->fill == DCP_BUF_SZ) { ++ ret = mxs_dcp_run_sha(req); ++ if (ret) ++ return ret; ++ actx->fill = 0; ++ rctx->init = 0; ++ } + } + + if (fin) { +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index 69e0d90460e6..2349f2ad946b 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -1180,20 +1180,21 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, + * channel/memory controller/... may be affected. + * Also, don't show errors for empty DIMM slots. + */ +- if (!e->enable_per_layer_report || !dimm->nr_pages) ++ if (!dimm->nr_pages) + continue; + +- if (n_labels >= EDAC_MAX_LABELS) { +- e->enable_per_layer_report = false; +- break; +- } + n_labels++; +- if (p != e->label) { +- strcpy(p, OTHER_LABEL); +- p += strlen(OTHER_LABEL); ++ if (n_labels > EDAC_MAX_LABELS) { ++ p = e->label; ++ *p = '\0'; ++ } else { ++ if (p != e->label) { ++ strcpy(p, OTHER_LABEL); ++ p += strlen(OTHER_LABEL); ++ } ++ strcpy(p, dimm->label); ++ p += strlen(p); + } +- strcpy(p, dimm->label); +- p += strlen(p); + + /* + * get csrow/channel of the DIMM, in order to allow +diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c +index a479023fa036..77eaa9a2fd15 100644 +--- a/drivers/firmware/arm_sdei.c ++++ b/drivers/firmware/arm_sdei.c +@@ -491,11 +491,6 @@ static int _sdei_event_unregister(struct sdei_event *event) + { + lockdep_assert_held(&sdei_events_lock); + +- spin_lock(&sdei_list_lock); +- event->reregister = false; +- event->reenable = false; +- spin_unlock(&sdei_list_lock); +- + if (event->type == SDEI_EVENT_TYPE_SHARED) + return sdei_api_event_unregister(event->event_num); + +@@ -518,6 +513,11 @@ int sdei_event_unregister(u32 event_num) + break; + } + ++ spin_lock(&sdei_list_lock); ++ event->reregister = false; ++ event->reenable = false; ++ spin_unlock(&sdei_list_lock); ++ + err = _sdei_event_unregister(event); + if (err) + break; +@@ -585,26 +585,15 @@ static int _sdei_event_register(struct sdei_event *event) + + lockdep_assert_held(&sdei_events_lock); + +- spin_lock(&sdei_list_lock); +- event->reregister = true; +- spin_unlock(&sdei_list_lock); +- + if (event->type == SDEI_EVENT_TYPE_SHARED) + return sdei_api_event_register(event->event_num, + sdei_entry_point, + event->registered, + SDEI_EVENT_REGISTER_RM_ANY, 0); + +- + err = sdei_do_cross_call(_local_event_register, event); +- if (err) { +- spin_lock(&sdei_list_lock); +- event->reregister = false; +- event->reenable = false; +- spin_unlock(&sdei_list_lock); +- ++ if (err) + sdei_do_cross_call(_local_event_unregister, event); +- } + + return err; + } +@@ -632,8 +621,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) + break; + } + ++ spin_lock(&sdei_list_lock); ++ event->reregister = true; ++ spin_unlock(&sdei_list_lock); ++ + err = _sdei_event_register(event); + if (err) { ++ spin_lock(&sdei_list_lock); ++ event->reregister = false; ++ event->reenable = false; ++ spin_unlock(&sdei_list_lock); ++ + sdei_event_destroy(event); + pr_warn("Failed to register event %u: %d\n", event_num, + err); +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index a9778591341b..8c32054a266c 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -570,7 +570,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, + } + } + +- if (efi_enabled(EFI_MEMMAP)) ++ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) + efi_memattr_init(); + + efi_tpm_eventlog_init(); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 9a8a1c6ca321..7d340c9ec303 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -2259,8 +2259,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) + { + int i, r; + +- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); +- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) +@@ -3242,6 +3240,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) + } + } + ++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); ++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); ++ + amdgpu_amdkfd_suspend(adev); + + amdgpu_ras_suspend(adev); +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index 67f30fec94df..2bafd1a1ed89 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -1040,6 +1040,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) + adev->gfx.mec_fw_write_wait = true; + break; + default: ++ adev->gfx.me_fw_write_wait = true; ++ adev->gfx.mec_fw_write_wait = true; + break; + } + } +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +index 5f683d118d2a..503c0e3cc4a7 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +@@ -641,7 +641,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { +- if (clock_table->FClocks[i].Freq != 0) { ++ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) { + j = i; + break; + } +diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +index 977bdd962e98..df47acab2ec0 100644 +--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +@@ -181,6 +181,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, + uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + SmuMetrics_t metrics; ++ bool cur_value_match_level = false; + + if (!clk_table || clk_type >= SMU_CLK_COUNT) + return -EINVAL; +@@ -240,8 +241,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, + GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); ++ if (cur_value == value) ++ cur_value_match_level = true; + } + ++ if (!cur_value_match_level) ++ size += sprintf(buf + size, " %uMhz *\n", cur_value); ++ + return size; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +index 2a390ddd37dd..89cd6da118a3 100644 +--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h ++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu); + freq = table->SocClocks[dpm_level].Freq; \ + break; \ + case SMU_MCLK: \ +- freq = table->MemClocks[dpm_level].Freq; \ ++ freq = table->FClocks[dpm_level].Freq; \ + break; \ + case SMU_DCEFCLK: \ + freq = table->DcfClocks[dpm_level].Freq; \ +diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c +index 274989f96a91..914263a1afab 100644 +--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c ++++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c +@@ -866,10 +866,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) + if (err) + return err; + +- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd); +- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]); + err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], +- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); ++ SP_DP_MAIN_LINK_BW_SET_REG, ++ anx78xx->dpcd[DP_MAX_LINK_RATE]); + if (err) + return err; + +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 4a65ef8d8bff..c9dd41175853 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -3437,9 +3437,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) + int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) + { + int ret = 0; +- int i = 0; + struct drm_dp_mst_branch *mstb = NULL; + ++ mutex_lock(&mgr->payload_lock); + mutex_lock(&mgr->lock); + if (mst_state == mgr->mst_state) + goto out_unlock; +@@ -3498,25 +3498,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms + /* this can fail if the device is gone */ + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + ret = 0; +- mutex_lock(&mgr->payload_lock); +- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); ++ memset(mgr->payloads, 0, ++ mgr->max_payloads * sizeof(mgr->payloads[0])); ++ memset(mgr->proposed_vcpis, 0, ++ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); + mgr->payload_mask = 0; + set_bit(0, &mgr->payload_mask); +- for (i = 0; i < mgr->max_payloads; i++) { +- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; +- +- if (vcpi) { +- vcpi->vcpi = 0; +- vcpi->num_slots = 0; +- } +- mgr->proposed_vcpis[i] = NULL; +- } + mgr->vcpi_mask = 0; +- mutex_unlock(&mgr->payload_lock); + } + + out_unlock: + mutex_unlock(&mgr->lock); ++ mutex_unlock(&mgr->payload_lock); + if (mstb) + drm_dp_mst_topology_put_mstb(mstb); + return ret; +diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c +index a86a3ab2771c..235729f4aadb 100644 +--- a/drivers/gpu/drm/drm_pci.c ++++ b/drivers/gpu/drm/drm_pci.c +@@ -51,8 +51,6 @@ + drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) + { + drm_dma_handle_t *dmah; +- unsigned long addr; +- size_t sz; + + /* pci_alloc_consistent only guarantees alignment to the smallest + * PAGE_SIZE order which is greater than or equal to the requested size. +@@ -68,20 +66,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali + dmah->size = size; + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, + &dmah->busaddr, +- GFP_KERNEL | __GFP_COMP); ++ GFP_KERNEL); + + if (dmah->vaddr == NULL) { + kfree(dmah); + return NULL; + } + +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Reserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- SetPageReserved(virt_to_page((void *)addr)); +- } +- + return dmah; + } + +@@ -94,19 +85,9 @@ EXPORT_SYMBOL(drm_pci_alloc); + */ + void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) + { +- unsigned long addr; +- size_t sz; +- +- if (dmah->vaddr) { +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Unreserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- ClearPageReserved(virt_to_page((void *)addr)); +- } ++ if (dmah->vaddr) + dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, + dmah->busaddr); +- } + } + + /** +diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c +index 698f5e81591f..18d46b172f06 100644 +--- a/drivers/gpu/drm/drm_prime.c ++++ b/drivers/gpu/drm/drm_prime.c +@@ -959,27 +959,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + unsigned count; + struct scatterlist *sg; + struct page *page; +- u32 len, index; ++ u32 page_len, page_index; + dma_addr_t addr; ++ u32 dma_len, dma_index; + +- index = 0; ++ /* ++ * Scatterlist elements contains both pages and DMA addresses, but ++ * one shoud not assume 1:1 relation between them. The sg->length is ++ * the size of the physical memory chunk described by the sg->page, ++ * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk ++ * described by the sg_dma_address(sg). ++ */ ++ page_index = 0; ++ dma_index = 0; + for_each_sg(sgt->sgl, sg, sgt->nents, count) { +- len = sg_dma_len(sg); ++ page_len = sg->length; + page = sg_page(sg); ++ dma_len = sg_dma_len(sg); + addr = sg_dma_address(sg); + +- while (len > 0) { +- if (WARN_ON(index >= max_entries)) ++ while (pages && page_len > 0) { ++ if (WARN_ON(page_index >= max_entries)) + return -1; +- if (pages) +- pages[index] = page; +- if (addrs) +- addrs[index] = addr; +- ++ pages[page_index] = page; + page++; ++ page_len -= PAGE_SIZE; ++ page_index++; ++ } ++ while (addrs && dma_len > 0) { ++ if (WARN_ON(dma_index >= max_entries)) ++ return -1; ++ addrs[dma_index] = addr; + addr += PAGE_SIZE; +- len -= PAGE_SIZE; +- index++; ++ dma_len -= PAGE_SIZE; ++ dma_index++; + } + } + return 0; +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +index 8adbf2861bff..e6795bafcbb9 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +@@ -32,6 +32,7 @@ struct etnaviv_pm_domain { + }; + + struct etnaviv_pm_domain_meta { ++ unsigned int feature; + const struct etnaviv_pm_domain *domains; + u32 nr_domains; + }; +@@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = { + + static const struct etnaviv_pm_domain_meta doms_meta[] = { + { ++ .feature = chipFeatures_PIPE_3D, + .nr_domains = ARRAY_SIZE(doms_3d), + .domains = &doms_3d[0] + }, + { ++ .feature = chipFeatures_PIPE_2D, + .nr_domains = ARRAY_SIZE(doms_2d), + .domains = &doms_2d[0] + }, + { ++ .feature = chipFeatures_PIPE_VG, + .nr_domains = ARRAY_SIZE(doms_vg), + .domains = &doms_vg[0] + } + }; + ++static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu) ++{ ++ unsigned int num = 0, i; ++ ++ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { ++ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; ++ ++ if (gpu->identity.features & meta->feature) ++ num += meta->nr_domains; ++ } ++ ++ return num; ++} ++ ++static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, ++ unsigned int index) ++{ ++ const struct etnaviv_pm_domain *domain = NULL; ++ unsigned int offset = 0, i; ++ ++ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { ++ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; ++ ++ if (!(gpu->identity.features & meta->feature)) ++ continue; ++ ++ if (meta->nr_domains < (index - offset)) { ++ offset += meta->nr_domains; ++ continue; ++ } ++ ++ domain = meta->domains + (index - offset); ++ } ++ ++ return domain; ++} ++ + int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, + struct drm_etnaviv_pm_domain *domain) + { +- const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe]; ++ const unsigned int nr_domains = num_pm_domains(gpu); + const struct etnaviv_pm_domain *dom; + +- if (domain->iter >= meta->nr_domains) ++ if (domain->iter >= nr_domains) + return -EINVAL; + +- dom = meta->domains + domain->iter; ++ dom = pm_domain(gpu, domain->iter); ++ if (!dom) ++ return -EINVAL; + + domain->id = domain->iter; + domain->nr_signals = dom->nr_signals; + strncpy(domain->name, dom->name, sizeof(domain->name)); + + domain->iter++; +- if (domain->iter == meta->nr_domains) ++ if (domain->iter == nr_domains) + domain->iter = 0xff; + + return 0; +@@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, + int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, + struct drm_etnaviv_pm_signal *signal) + { +- const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe]; ++ const unsigned int nr_domains = num_pm_domains(gpu); + const struct etnaviv_pm_domain *dom; + const struct etnaviv_pm_signal *sig; + +- if (signal->domain >= meta->nr_domains) ++ if (signal->domain >= nr_domains) + return -EINVAL; + +- dom = meta->domains + signal->domain; ++ dom = pm_domain(gpu, signal->domain); ++ if (!dom) ++ return -EINVAL; + + if (signal->iter >= dom->nr_signals) + return -EINVAL; +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 1488822398fe..4872c357eb6d 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -2235,7 +2235,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, + return; + + dig_port = enc_to_dig_port(&encoder->base); +- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); ++ ++ if (!intel_phy_is_tc(dev_priv, phy) || ++ dig_port->tc_mode != TC_PORT_TBT_ALT) ++ intel_display_power_get(dev_priv, ++ dig_port->ddi_io_power_domain); + + /* + * AUX power is only needed for (e)DP mode, and for HDMI mode on TC +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index 768c4b607dd2..b9757912b09e 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -939,11 +939,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) + + static void reloc_gpu_flush(struct reloc_cache *cache) + { +- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); ++ struct drm_i915_gem_object *obj = cache->rq->batch->obj; ++ ++ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); + cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; + +- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); +- i915_gem_object_unpin_map(cache->rq->batch->obj); ++ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); ++ i915_gem_object_unpin_map(obj); + + intel_gt_chipset_flush(cache->rq->engine->gt); + +diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c +index 20d6ee148afc..88b1483bac4a 100644 +--- a/drivers/gpu/drm/i915/gt/intel_rps.c ++++ b/drivers/gpu/drm/i915/gt/intel_rps.c +@@ -763,6 +763,19 @@ void intel_rps_park(struct intel_rps *rps) + intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); + rps_set(rps, rps->idle_freq); + intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); ++ ++ /* ++ * Since we will try and restart from the previously requested ++ * frequency on unparking, treat this idle point as a downclock ++ * interrupt and reduce the frequency for resume. If we park/unpark ++ * more frequently than the rps worker can run, we will not respond ++ * to any EI and never see a change in frequency. ++ * ++ * (Note we accommodate Cherryview's limitation of only using an ++ * even bin by applying it to all.) ++ */ ++ rps->cur_freq = ++ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq); + } + + void intel_rps_boost(struct i915_request *rq) +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c +index 3c57e84222ca..5bb9feddbfd6 100644 +--- a/drivers/gpu/drm/scheduler/sched_main.c ++++ b/drivers/gpu/drm/scheduler/sched_main.c +@@ -632,7 +632,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) + + trace_drm_sched_process_job(s_fence); + ++ dma_fence_get(&s_fence->finished); + drm_sched_fence_finished(s_fence); ++ dma_fence_put(&s_fence->finished); + wake_up_interruptible(&sched->wake_up_worker); + } + +diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c +index 8512d970a09f..ac8f75db2ecd 100644 +--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c ++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c +@@ -41,6 +41,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) + return -ENODEV; + ++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb"); ++ if (ret) ++ return ret; ++ + vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); + if (!vbox) + return -ENOMEM; +diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c +index a7a81846d5b1..635dd697ac0b 100644 +--- a/drivers/i2c/busses/i2c-pca-platform.c ++++ b/drivers/i2c/busses/i2c-pca-platform.c +@@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) + int ret = 0; + int irq; + +- irq = platform_get_irq(pdev, 0); ++ irq = platform_get_irq_optional(pdev, 0); + /* If irq is 0, we do polling. */ + if (irq < 0) + irq = 0; +diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c +index 54e1fc8a495e..f7f7b5b64720 100644 +--- a/drivers/i2c/busses/i2c-st.c ++++ b/drivers/i2c/busses/i2c-st.c +@@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) + /** + * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode + * @i2c_dev: Controller's private data ++ * @max: Maximum amount of data to fill into the Tx FIFO + * + * This functions fills the Tx FIFO with fixed pattern when + * in read mode to trigger clock. +diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c +index 14b55bacdd0f..fb078e049413 100644 +--- a/drivers/input/keyboard/tm2-touchkey.c ++++ b/drivers/input/keyboard/tm2-touchkey.c +@@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = { + .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF, + }; + ++static const struct touchkey_variant tc360_touchkey_variant = { ++ .keycode_reg = 0x00, ++ .base_reg = 0x00, ++ .fixed_regulator = true, ++ .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON, ++ .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF, ++}; ++ + static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) + { +@@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = { + }, { + .compatible = "cypress,aries-touchkey", + .data = &aries_touchkey_variant, ++ }, { ++ .compatible = "coreriver,tc360-touchkey", ++ .data = &tc360_touchkey_variant, + }, + { }, + }; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index dc974c288e88..08e919dbeb5d 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -530,6 +530,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + }, + }, ++ { ++ /* ++ * Acer Aspire 5738z ++ * Touchpad stops working in mux mode when dis- + re-enabled ++ * with the touchpad enable/disable toggle hotkey ++ */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index 50f89056c16b..8c757a125a55 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -3142,12 +3142,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, + return 0; + } + ++static int its_vpe_retrigger(struct irq_data *d) ++{ ++ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); ++} ++ + static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, ++ .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, + }; +diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c +index 928858dada75..f1386733d3bc 100644 +--- a/drivers/irqchip/irq-versatile-fpga.c ++++ b/drivers/irqchip/irq-versatile-fpga.c +@@ -6,6 +6,7 @@ + #include <linux/irq.h> + #include <linux/io.h> + #include <linux/irqchip.h> ++#include <linux/irqchip/chained_irq.h> + #include <linux/irqchip/versatile-fpga.h> + #include <linux/irqdomain.h> + #include <linux/module.h> +@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) + + static void fpga_irq_handle(struct irq_desc *desc) + { ++ struct irq_chip *chip = irq_desc_get_chip(desc); + struct fpga_irq_data *f = irq_desc_get_handler_data(desc); +- u32 status = readl(f->base + IRQ_STATUS); ++ u32 status; ++ ++ chained_irq_enter(chip, desc); + ++ status = readl(f->base + IRQ_STATUS); + if (status == 0) { + do_bad_IRQ(desc); +- return; ++ goto out; + } + + do { +@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) + status &= ~(1 << irq); + generic_handle_irq(irq_find_mapping(f->domain, irq)); + } while (status); ++ ++out: ++ chained_irq_exit(chip, desc); + } + + /* +@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, + if (of_property_read_u32(node, "valid-mask", &valid_mask)) + valid_mask = 0; + ++ writel(clear_mask, base + IRQ_ENABLE_CLEAR); ++ writel(clear_mask, base + FIQ_ENABLE_CLEAR); ++ + /* Some chips are cascaded from a parent IRQ */ + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { +@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, + + fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); + +- writel(clear_mask, base + IRQ_ENABLE_CLEAR); +- writel(clear_mask, base + FIQ_ENABLE_CLEAR); +- + /* + * On Versatile AB/PB, some secondary interrupts have a direct + * pass-thru to the primary controller for IRQs 20 and 22-31 which need +diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c +index c05b12110456..17712456fa63 100644 +--- a/drivers/md/dm-clone-metadata.c ++++ b/drivers/md/dm-clone-metadata.c +@@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd, + return (bit >= (start + nr_regions)); + } + +-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd) ++unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd) + { + return bitmap_weight(cmd->region_map, cmd->nr_regions); + } +@@ -850,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re + struct dirty_map *dmap; + unsigned long word, flags; + ++ if (unlikely(region_nr >= cmd->nr_regions)) { ++ DMERR("Region %lu out of range (total number of regions %lu)", ++ region_nr, cmd->nr_regions); ++ return -ERANGE; ++ } ++ + word = region_nr / BITS_PER_LONG; + + spin_lock_irqsave(&cmd->bitmap_lock, flags); +@@ -879,6 +885,13 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, + struct dirty_map *dmap; + unsigned long word, region_nr; + ++ if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start || ++ (start + nr_regions) > cmd->nr_regions)) { ++ DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)", ++ start, nr_regions, cmd->nr_regions); ++ return -ERANGE; ++ } ++ + spin_lock_irq(&cmd->bitmap_lock); + + if (cmd->read_only) { +diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h +index 14af1ebd853f..d848b8799c07 100644 +--- a/drivers/md/dm-clone-metadata.h ++++ b/drivers/md/dm-clone-metadata.h +@@ -156,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd, + /* + * Returns the number of hydrated regions. + */ +-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd); ++unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd); + + /* + * Returns the first unhydrated region with region_nr >= @start +diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c +index d1e1b5b56b1b..5ce96ddf1ce1 100644 +--- a/drivers/md/dm-clone-target.c ++++ b/drivers/md/dm-clone-target.c +@@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio) + /* Get the address of the region in sectors */ + static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr) + { +- return (region_nr << clone->region_shift); ++ return ((sector_t)region_nr << clone->region_shift); + } + + /* Get the region number of the bio */ +@@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio) + + /* Get the region range covered by the bio */ + static void bio_region_range(struct clone *clone, struct bio *bio, +- unsigned long *rs, unsigned long *re) ++ unsigned long *rs, unsigned long *nr_regions) + { ++ unsigned long end; ++ + *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size); +- *re = bio_end_sector(bio) >> clone->region_shift; ++ end = bio_end_sector(bio) >> clone->region_shift; ++ ++ if (*rs >= end) ++ *nr_regions = 0; ++ else ++ *nr_regions = end - *rs; + } + + /* Check whether a bio overwrites a region */ +@@ -454,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len) + + static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success) + { +- unsigned long rs, re; ++ unsigned long rs, nr_regions; + + /* + * If the destination device supports discards, remap and trim the +@@ -463,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ + */ + if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) { + remap_to_dest(clone, bio); +- bio_region_range(clone, bio, &rs, &re); +- trim_bio(bio, rs << clone->region_shift, +- (re - rs) << clone->region_shift); ++ bio_region_range(clone, bio, &rs, &nr_regions); ++ trim_bio(bio, region_to_sector(clone, rs), ++ nr_regions << clone->region_shift); + generic_make_request(bio); + } else + bio_endio(bio); +@@ -473,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ + + static void process_discard_bio(struct clone *clone, struct bio *bio) + { +- unsigned long rs, re; ++ unsigned long rs, nr_regions; + +- bio_region_range(clone, bio, &rs, &re); +- BUG_ON(re > clone->nr_regions); ++ bio_region_range(clone, bio, &rs, &nr_regions); ++ if (!nr_regions) { ++ bio_endio(bio); ++ return; ++ } + +- if (unlikely(rs == re)) { ++ if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs || ++ (rs + nr_regions) > clone->nr_regions)) { ++ DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)", ++ clone_device_name(clone), rs, nr_regions, ++ clone->nr_regions, ++ (unsigned long long)bio->bi_iter.bi_sector, ++ bio_sectors(bio)); + bio_endio(bio); + return; + } +@@ -487,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio) + * The covered regions are already hydrated so we just need to pass + * down the discard. + */ +- if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) { ++ if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) { + complete_discard_bio(clone, bio, true); + return; + } +@@ -788,11 +804,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr + struct dm_io_region from, to; + struct clone *clone = hd->clone; + ++ if (WARN_ON(!nr_regions)) ++ return; ++ + region_size = clone->region_size; + region_start = hd->region_nr; + region_end = region_start + nr_regions - 1; + +- total_size = (nr_regions - 1) << clone->region_shift; ++ total_size = region_to_sector(clone, nr_regions - 1); + + if (region_end == clone->nr_regions - 1) { + /* +@@ -1169,7 +1188,7 @@ static void process_deferred_discards(struct clone *clone) + int r = -EPERM; + struct bio *bio; + struct blk_plug plug; +- unsigned long rs, re; ++ unsigned long rs, nr_regions; + struct bio_list discards = BIO_EMPTY_LIST; + + spin_lock_irq(&clone->lock); +@@ -1185,14 +1204,13 @@ static void process_deferred_discards(struct clone *clone) + + /* Update the metadata */ + bio_list_for_each(bio, &discards) { +- bio_region_range(clone, bio, &rs, &re); ++ bio_region_range(clone, bio, &rs, &nr_regions); + /* + * A discard request might cover regions that have been already + * hydrated. There is no need to update the metadata for these + * regions. + */ +- r = dm_clone_cond_set_range(clone->cmd, rs, re - rs); +- ++ r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions); + if (unlikely(r)) + break; + } +@@ -1455,7 +1473,7 @@ static void clone_status(struct dm_target *ti, status_type_t type, + goto error; + } + +- DMEMIT("%u %llu/%llu %llu %lu/%lu %u ", ++ DMEMIT("%u %llu/%llu %llu %u/%lu %u ", + DM_CLONE_METADATA_BLOCK_SIZE, + (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks), + (unsigned long long)nr_metadata_blocks, +@@ -1775,6 +1793,7 @@ error: + static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) + { + int r; ++ sector_t nr_regions; + struct clone *clone; + struct dm_arg_set as; + +@@ -1816,7 +1835,16 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) + goto out_with_source_dev; + + clone->region_shift = __ffs(clone->region_size); +- clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size); ++ nr_regions = dm_sector_div_up(ti->len, clone->region_size); ++ ++ /* Check for overflow */ ++ if (nr_regions != (unsigned long)nr_regions) { ++ ti->error = "Too many regions. Consider increasing the region size"; ++ r = -EOVERFLOW; ++ goto out_with_source_dev; ++ } ++ ++ clone->nr_regions = nr_regions; + + r = validate_nr_regions(clone->nr_regions, &ti->error); + if (r) +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index a82a9c257744..657cefe44195 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -1519,7 +1519,7 @@ static void integrity_metadata(struct work_struct *w) + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + char *checksums; + unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; +- char checksums_onstack[HASH_MAX_DIGESTSIZE]; ++ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + unsigned sectors_to_process = dio->range.n_sectors; + sector_t sector = dio->range.logical_sector; + +@@ -1748,7 +1748,7 @@ retry_kmap: + } while (++s < ic->sectors_per_block); + #ifdef INTERNAL_VERIFY + if (ic->internal_hash) { +- char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; ++ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + + integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); + if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { +diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c +index 3ceeb6b404ed..49147e634046 100644 +--- a/drivers/md/dm-verity-fec.c ++++ b/drivers/md/dm-verity-fec.c +@@ -551,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v) + mempool_exit(&f->rs_pool); + mempool_exit(&f->prealloc_pool); + mempool_exit(&f->extra_pool); ++ mempool_exit(&f->output_pool); + kmem_cache_destroy(f->cache); + + if (f->data_bufio) +diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c +index cd9e4c8a023a..af190018d10a 100644 +--- a/drivers/md/dm-writecache.c ++++ b/drivers/md/dm-writecache.c +@@ -872,6 +872,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc) + struct wc_entry *e = &wc->entries[b]; + e->index = b; + e->write_in_progress = false; ++ cond_resched(); + } + + return 0; +@@ -926,6 +927,7 @@ static void writecache_resume(struct dm_target *ti) + e->original_sector = le64_to_cpu(wme.original_sector); + e->seq_count = le64_to_cpu(wme.seq_count); + } ++ cond_resched(); + } + #endif + for (b = 0; b < wc->n_blocks; b++) { +@@ -1770,8 +1772,10 @@ static int init_memory(struct dm_writecache *wc) + pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); + pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); + +- for (b = 0; b < wc->n_blocks; b++) ++ for (b = 0; b < wc->n_blocks; b++) { + write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); ++ cond_resched(); ++ } + + writecache_flush_all_metadata(wc); + writecache_commit_flushed(wc, false); +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c +index 516c7b671d25..369de15c4e80 100644 +--- a/drivers/md/dm-zoned-metadata.c ++++ b/drivers/md/dm-zoned-metadata.c +@@ -1109,7 +1109,6 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data) + switch (blkz->type) { + case BLK_ZONE_TYPE_CONVENTIONAL: + set_bit(DMZ_RND, &zone->flags); +- zmd->nr_rnd_zones++; + break; + case BLK_ZONE_TYPE_SEQWRITE_REQ: + case BLK_ZONE_TYPE_SEQWRITE_PREF: +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 4e7c9f398bc6..6b69a12ca2d8 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6040,7 +6040,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); + static void mddev_detach(struct mddev *mddev) + { + md_bitmap_wait_behind_writes(mddev); +- if (mddev->pers && mddev->pers->quiesce) { ++ if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } +diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c +index d6cd15bb699a..cc678d9d2e0d 100644 +--- a/drivers/media/i2c/ov5695.c ++++ b/drivers/media/i2c/ov5695.c +@@ -971,16 +971,9 @@ unlock_and_return: + return ret; + } + +-/* Calculate the delay in us by clock rate and clock cycles */ +-static inline u32 ov5695_cal_delay(u32 cycles) +-{ +- return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000); +-} +- + static int __ov5695_power_on(struct ov5695 *ov5695) + { +- int ret; +- u32 delay_us; ++ int i, ret; + struct device *dev = &ov5695->client->dev; + + ret = clk_prepare_enable(ov5695->xvclk); +@@ -991,21 +984,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695) + + gpiod_set_value_cansleep(ov5695->reset_gpio, 1); + +- ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies); +- if (ret < 0) { +- dev_err(dev, "Failed to enable regulators\n"); +- goto disable_clk; ++ /* ++ * The hardware requires the regulators to be powered on in order, ++ * so enable them one by one. ++ */ ++ for (i = 0; i < OV5695_NUM_SUPPLIES; i++) { ++ ret = regulator_enable(ov5695->supplies[i].consumer); ++ if (ret) { ++ dev_err(dev, "Failed to enable %s: %d\n", ++ ov5695->supplies[i].supply, ret); ++ goto disable_reg_clk; ++ } + } + + gpiod_set_value_cansleep(ov5695->reset_gpio, 0); + +- /* 8192 cycles prior to first SCCB transaction */ +- delay_us = ov5695_cal_delay(8192); +- usleep_range(delay_us, delay_us * 2); ++ usleep_range(1000, 1200); + + return 0; + +-disable_clk: ++disable_reg_clk: ++ for (--i; i >= 0; i--) ++ regulator_disable(ov5695->supplies[i].consumer); + clk_disable_unprepare(ov5695->xvclk); + + return ret; +@@ -1013,9 +1013,22 @@ disable_clk: + + static void __ov5695_power_off(struct ov5695 *ov5695) + { ++ struct device *dev = &ov5695->client->dev; ++ int i, ret; ++ + clk_disable_unprepare(ov5695->xvclk); + gpiod_set_value_cansleep(ov5695->reset_gpio, 1); +- regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies); ++ ++ /* ++ * The hardware requires the regulators to be powered off in order, ++ * so disable them one by one. ++ */ ++ for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) { ++ ret = regulator_disable(ov5695->supplies[i].consumer); ++ if (ret) ++ dev_err(dev, "Failed to disable %s: %d\n", ++ ov5695->supplies[i].supply, ret); ++ } + } + + static int __maybe_unused ov5695_runtime_resume(struct device *dev) +@@ -1285,7 +1298,7 @@ static int ov5695_probe(struct i2c_client *client, + if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ) + dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n"); + +- ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); ++ ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ov5695->reset_gpio)) { + dev_err(dev, "Failed to get reset-gpios\n"); + return -EINVAL; +diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c +index 078141712c88..0b977e73ceb2 100644 +--- a/drivers/media/i2c/video-i2c.c ++++ b/drivers/media/i2c/video-i2c.c +@@ -255,7 +255,7 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on) + return amg88xx_set_power_off(data); + } + +-#if IS_ENABLED(CONFIG_HWMON) ++#if IS_REACHABLE(CONFIG_HWMON) + + static const u32 amg88xx_temp_config[] = { + HWMON_T_INPUT, +diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c +index 6720d11f50cf..b065ccd06914 100644 +--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c ++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c +@@ -15,7 +15,7 @@ static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu) + return container_of(vpu, struct mtk_mdp_ctx, vpu); + } + +-static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg) ++static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg) + { + struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *) + (unsigned long)msg->ap_inst; +@@ -26,10 +26,11 @@ static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg) + vpu->inst_addr = msg->vpu_inst_addr; + } + +-static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv) ++static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len, ++ void *priv) + { +- unsigned int msg_id = *(unsigned int *)data; +- struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data; ++ const struct mdp_ipi_comm_ack *msg = data; ++ unsigned int msg_id = msg->msg_id; + struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *) + (unsigned long)msg->ap_inst; + struct mtk_mdp_ctx *ctx; +diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c +index 70abfd4cd4b9..948a12fd9d46 100644 +--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c ++++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c +@@ -9,7 +9,7 @@ + #include "vdec_ipi_msg.h" + #include "vdec_vpu_if.h" + +-static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg) ++static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg) + { + struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *) + (unsigned long)msg->ap_inst_addr; +@@ -34,9 +34,9 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg) + * This function runs in interrupt context and it means there's an IPI MSG + * from VPU. + */ +-static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv) ++static void vpu_dec_ipi_handler(const void *data, unsigned int len, void *priv) + { +- struct vdec_vpu_ipi_ack *msg = data; ++ const struct vdec_vpu_ipi_ack *msg = data; + struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *) + (unsigned long)msg->ap_inst_addr; + +diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c +index 3e931b0ed096..9540709c1905 100644 +--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c ++++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c +@@ -8,26 +8,26 @@ + #include "venc_ipi_msg.h" + #include "venc_vpu_if.h" + +-static void handle_enc_init_msg(struct venc_vpu_inst *vpu, void *data) ++static void handle_enc_init_msg(struct venc_vpu_inst *vpu, const void *data) + { +- struct venc_vpu_ipi_msg_init *msg = data; ++ const struct venc_vpu_ipi_msg_init *msg = data; + + vpu->inst_addr = msg->vpu_inst_addr; + vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr); + } + +-static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, void *data) ++static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data) + { +- struct venc_vpu_ipi_msg_enc *msg = data; ++ const struct venc_vpu_ipi_msg_enc *msg = data; + + vpu->state = msg->state; + vpu->bs_size = msg->bs_size; + vpu->is_key_frm = msg->is_key_frm; + } + +-static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv) ++static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv) + { +- struct venc_vpu_ipi_msg_common *msg = data; ++ const struct venc_vpu_ipi_msg_common *msg = data; + struct venc_vpu_inst *vpu = + (struct venc_vpu_inst *)(unsigned long)msg->venc_inst; + +diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c +index a768707abb94..2fbccc9b247b 100644 +--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c ++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c +@@ -203,8 +203,8 @@ struct mtk_vpu { + struct vpu_run run; + struct vpu_wdt wdt; + struct vpu_ipi_desc ipi_desc[IPI_MAX]; +- struct share_obj *recv_buf; +- struct share_obj *send_buf; ++ struct share_obj __iomem *recv_buf; ++ struct share_obj __iomem *send_buf; + struct device *dev; + struct clk *clk; + bool fw_loaded; +@@ -292,7 +292,7 @@ int vpu_ipi_send(struct platform_device *pdev, + unsigned int len) + { + struct mtk_vpu *vpu = platform_get_drvdata(pdev); +- struct share_obj *send_obj = vpu->send_buf; ++ struct share_obj __iomem *send_obj = vpu->send_buf; + unsigned long timeout; + int ret = 0; + +@@ -325,9 +325,9 @@ int vpu_ipi_send(struct platform_device *pdev, + } + } while (vpu_cfg_readl(vpu, HOST_TO_VPU)); + +- memcpy((void *)send_obj->share_buf, buf, len); +- send_obj->len = len; +- send_obj->id = id; ++ memcpy_toio(send_obj->share_buf, buf, len); ++ writel(len, &send_obj->len); ++ writel(id, &send_obj->id); + + vpu->ipi_id_ack[id] = false; + /* send the command to VPU */ +@@ -600,10 +600,10 @@ OUT_LOAD_FW: + } + EXPORT_SYMBOL_GPL(vpu_load_firmware); + +-static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv) ++static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv) + { +- struct mtk_vpu *vpu = (struct mtk_vpu *)priv; +- struct vpu_run *run = (struct vpu_run *)data; ++ struct mtk_vpu *vpu = priv; ++ const struct vpu_run *run = data; + + vpu->run.signaled = run->signaled; + strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver)); +@@ -700,19 +700,21 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type) + + static void vpu_ipi_handler(struct mtk_vpu *vpu) + { +- struct share_obj *rcv_obj = vpu->recv_buf; ++ struct share_obj __iomem *rcv_obj = vpu->recv_buf; + struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc; +- +- if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) { +- ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf, +- rcv_obj->len, +- ipi_desc[rcv_obj->id].priv); +- if (rcv_obj->id > IPI_VPU_INIT) { +- vpu->ipi_id_ack[rcv_obj->id] = true; ++ unsigned char data[SHARE_BUF_SIZE]; ++ s32 id = readl(&rcv_obj->id); ++ ++ memcpy_fromio(data, rcv_obj->share_buf, sizeof(data)); ++ if (id < IPI_MAX && ipi_desc[id].handler) { ++ ipi_desc[id].handler(data, readl(&rcv_obj->len), ++ ipi_desc[id].priv); ++ if (id > IPI_VPU_INIT) { ++ vpu->ipi_id_ack[id] = true; + wake_up(&vpu->ack_wq); + } + } else { +- dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id); ++ dev_err(vpu->dev, "No such ipi id = %d\n", id); + } + } + +@@ -722,11 +724,10 @@ static int vpu_ipi_init(struct mtk_vpu *vpu) + vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST); + + /* shared buffer initialization */ +- vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm + +- VPU_DTCM_OFFSET); ++ vpu->recv_buf = vpu->reg.tcm + VPU_DTCM_OFFSET; + vpu->send_buf = vpu->recv_buf + 1; +- memset(vpu->recv_buf, 0, sizeof(struct share_obj)); +- memset(vpu->send_buf, 0, sizeof(struct share_obj)); ++ memset_io(vpu->recv_buf, 0, sizeof(struct share_obj)); ++ memset_io(vpu->send_buf, 0, sizeof(struct share_obj)); + + return 0; + } +diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h +index d4453b4bcee9..ee7c552ce928 100644 +--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h ++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h +@@ -15,7 +15,7 @@ + * VPU interfaces with other blocks by share memory and interrupt. + **/ + +-typedef void (*ipi_handler_t) (void *data, ++typedef void (*ipi_handler_t) (const void *data, + unsigned int len, + void *priv); + +diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h +index 11585fb3cae3..2f661af7f873 100644 +--- a/drivers/media/platform/qcom/venus/core.h ++++ b/drivers/media/platform/qcom/venus/core.h +@@ -344,6 +344,7 @@ struct venus_inst { + unsigned int subscriptions; + int buf_count; + struct venus_ts_metadata tss[VIDEO_MAX_FRAME]; ++ unsigned long payloads[VIDEO_MAX_FRAME]; + u64 fps; + struct v4l2_fract timeperframe; + const struct venus_format *fmt_out; +diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c +index d3d1748a7ef6..33f70e1def94 100644 +--- a/drivers/media/platform/qcom/venus/firmware.c ++++ b/drivers/media/platform/qcom/venus/firmware.c +@@ -44,8 +44,14 @@ static void venus_reset_cpu(struct venus_core *core) + + int venus_set_hw_state(struct venus_core *core, bool resume) + { +- if (core->use_tz) +- return qcom_scm_set_remote_state(resume, 0); ++ int ret; ++ ++ if (core->use_tz) { ++ ret = qcom_scm_set_remote_state(resume, 0); ++ if (resume && ret == -EINVAL) ++ ret = 0; ++ return ret; ++ } + + if (resume) + venus_reset_cpu(core); +diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c +index a172f1ac0b35..32f8fb8d7f33 100644 +--- a/drivers/media/platform/qcom/venus/helpers.c ++++ b/drivers/media/platform/qcom/venus/helpers.c +@@ -544,18 +544,13 @@ static int scale_clocks_v4(struct venus_inst *inst) + struct venus_core *core = inst->core; + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; +- struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct device *dev = core->dev; + unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; + unsigned long filled_len = 0; +- struct venus_buffer *buf, *n; +- struct vb2_buffer *vb; + int i, ret; + +- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { +- vb = &buf->vb.vb2_buf; +- filled_len = max(filled_len, vb2_get_plane_payload(vb, 0)); +- } ++ for (i = 0; i < inst->num_input_bufs; i++) ++ filled_len = max(filled_len, inst->payloads[i]); + + if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) + return 0; +@@ -1289,6 +1284,15 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb) + } + EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare); + ++static void cache_payload(struct venus_inst *inst, struct vb2_buffer *vb) ++{ ++ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); ++ unsigned int idx = vbuf->vb2_buf.index; ++ ++ if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ++ inst->payloads[idx] = vb2_get_plane_payload(vb, 0); ++} ++ + void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) + { + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); +@@ -1300,6 +1304,8 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) + + v4l2_m2m_buf_queue(m2m_ctx, vbuf); + ++ cache_payload(inst, vb); ++ + if (inst->session_type == VIDC_SESSION_TYPE_ENC && + !(inst->streamon_out && inst->streamon_cap)) + goto unlock; +diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c +index 2293d936e49c..7f515a4b9bd1 100644 +--- a/drivers/media/platform/qcom/venus/hfi_parser.c ++++ b/drivers/media/platform/qcom/venus/hfi_parser.c +@@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data) + if (IS_V1(core)) { + core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC; + core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; ++ core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; + } + } + +diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c +index 223161f9c403..955a49b8e9c0 100644 +--- a/drivers/media/platform/ti-vpe/cal.c ++++ b/drivers/media/platform/ti-vpe/cal.c +@@ -266,8 +266,6 @@ struct cal_ctx { + struct v4l2_subdev *sensor; + struct v4l2_fwnode_endpoint endpoint; + +- struct v4l2_async_subdev asd; +- + struct v4l2_fh fh; + struct cal_dev *dev; + struct cc_data *cc; +@@ -537,16 +535,16 @@ static void enable_irqs(struct cal_ctx *ctx) + + static void disable_irqs(struct cal_ctx *ctx) + { ++ u32 val; ++ + /* Disable IRQ_WDMA_END 0/1 */ +- reg_write_field(ctx->dev, +- CAL_HL_IRQENABLE_CLR(2), +- CAL_HL_IRQ_CLEAR, +- CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ val = 0; ++ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val); + /* Disable IRQ_WDMA_START 0/1 */ +- reg_write_field(ctx->dev, +- CAL_HL_IRQENABLE_CLR(3), +- CAL_HL_IRQ_CLEAR, +- CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ val = 0; ++ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); ++ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val); + /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ + reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); + } +@@ -1648,7 +1646,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) + + parent = pdev->dev.of_node; + +- asd = &ctx->asd; + endpoint = &ctx->endpoint; + + ep_node = NULL; +@@ -1695,8 +1692,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) + ctx_dbg(3, ctx, "can't get remote parent\n"); + goto cleanup_exit; + } +- asd->match_type = V4L2_ASYNC_MATCH_FWNODE; +- asd->match.fwnode = of_fwnode_handle(sensor_node); + + v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); + +@@ -1726,9 +1721,17 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) + + v4l2_async_notifier_init(&ctx->notifier); + ++ asd = kzalloc(sizeof(*asd), GFP_KERNEL); ++ if (!asd) ++ goto cleanup_exit; ++ ++ asd->match_type = V4L2_ASYNC_MATCH_FWNODE; ++ asd->match.fwnode = of_fwnode_handle(sensor_node); ++ + ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd); + if (ret) { + ctx_err(ctx, "Error adding asd\n"); ++ kfree(asd); + goto cleanup_exit; + } + +diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c +index cd6b55433c9e..43e494df61d8 100644 +--- a/drivers/media/platform/vimc/vimc-streamer.c ++++ b/drivers/media/platform/vimc/vimc-streamer.c +@@ -207,8 +207,13 @@ int vimc_streamer_s_stream(struct vimc_stream *stream, + stream->kthread = kthread_run(vimc_streamer_thread, stream, + "vimc-streamer thread"); + +- if (IS_ERR(stream->kthread)) +- return PTR_ERR(stream->kthread); ++ if (IS_ERR(stream->kthread)) { ++ ret = PTR_ERR(stream->kthread); ++ dev_err(ved->dev, "kthread_run failed with %d\n", ret); ++ vimc_streamer_pipeline_terminate(stream); ++ stream->kthread = NULL; ++ return ret; ++ } + + } else { + if (!stream->kthread) +diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile +index 63261ef6380a..aaa1bf81d00d 100644 +--- a/drivers/media/rc/keymaps/Makefile ++++ b/drivers/media/rc/keymaps/Makefile +@@ -119,6 +119,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ + rc-videomate-m1f.o \ + rc-videomate-s350.o \ + rc-videomate-tv-pvr.o \ ++ rc-videostrong-kii-pro.o \ + rc-wetek-hub.o \ + rc-wetek-play2.o \ + rc-winfast.o \ +diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c +new file mode 100644 +index 000000000000..414d4d231e7e +--- /dev/null ++++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c +@@ -0,0 +1,83 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++// ++// Copyright (C) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com> ++ ++#include <media/rc-map.h> ++#include <linux/module.h> ++ ++// ++// Keytable for the Videostrong KII Pro STB remote control ++// ++ ++static struct rc_map_table kii_pro[] = { ++ { 0x59, KEY_POWER }, ++ { 0x19, KEY_MUTE }, ++ { 0x42, KEY_RED }, ++ { 0x40, KEY_GREEN }, ++ { 0x00, KEY_YELLOW }, ++ { 0x03, KEY_BLUE }, ++ { 0x4a, KEY_BACK }, ++ { 0x48, KEY_FORWARD }, ++ { 0x08, KEY_PREVIOUSSONG}, ++ { 0x0b, KEY_NEXTSONG}, ++ { 0x46, KEY_PLAYPAUSE }, ++ { 0x44, KEY_STOP }, ++ { 0x1f, KEY_FAVORITES}, //KEY_F5? ++ { 0x04, KEY_PVR }, ++ { 0x4d, KEY_EPG }, ++ { 0x02, KEY_INFO }, ++ { 0x09, KEY_SUBTITLE }, ++ { 0x01, KEY_AUDIO }, ++ { 0x0d, KEY_HOMEPAGE }, ++ { 0x11, KEY_TV }, // DTV ? ++ { 0x06, KEY_UP }, ++ { 0x5a, KEY_LEFT }, ++ { 0x1a, KEY_ENTER }, // KEY_OK ? ++ { 0x1b, KEY_RIGHT }, ++ { 0x16, KEY_DOWN }, ++ { 0x45, KEY_MENU }, ++ { 0x05, KEY_ESC }, ++ { 0x13, KEY_VOLUMEUP }, ++ { 0x17, KEY_VOLUMEDOWN }, ++ { 0x58, KEY_APPSELECT }, ++ { 0x12, KEY_VENDOR }, // mouse ++ { 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ? ++ { 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ? ++ { 0x52, KEY_1 }, ++ { 0x50, KEY_2 }, ++ { 0x10, KEY_3 }, ++ { 0x56, KEY_4 }, ++ { 0x54, KEY_5 }, ++ { 0x14, KEY_6 }, ++ { 0x4e, KEY_7 }, ++ { 0x4c, KEY_8 }, ++ { 0x0c, KEY_9 }, ++ { 0x18, KEY_WWW }, // KEY_F7 ++ { 0x0f, KEY_0 }, ++ { 0x51, KEY_BACKSPACE }, ++}; ++ ++static struct rc_map_list kii_pro_map = { ++ .map = { ++ .scan = kii_pro, ++ .size = ARRAY_SIZE(kii_pro), ++ .rc_proto = RC_PROTO_NEC, ++ .name = RC_MAP_KII_PRO, ++ } ++}; ++ ++static int __init init_rc_map_kii_pro(void) ++{ ++ return rc_map_register(&kii_pro_map); ++} ++ ++static void __exit exit_rc_map_kii_pro(void) ++{ ++ rc_map_unregister(&kii_pro_map); ++} ++ ++module_init(init_rc_map_kii_pro) ++module_exit(exit_rc_map_kii_pro) ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>"); +diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c +index 7841c11411d0..4faa8d2e5d04 100644 +--- a/drivers/mfd/dln2.c ++++ b/drivers/mfd/dln2.c +@@ -90,6 +90,11 @@ struct dln2_mod_rx_slots { + spinlock_t lock; + }; + ++enum dln2_endpoint { ++ DLN2_EP_OUT = 0, ++ DLN2_EP_IN = 1, ++}; ++ + struct dln2_dev { + struct usb_device *usb_dev; + struct usb_interface *interface; +@@ -733,10 +738,10 @@ static int dln2_probe(struct usb_interface *interface, + hostif->desc.bNumEndpoints < 2) + return -ENODEV; + +- epin = &hostif->endpoint[0].desc; +- epout = &hostif->endpoint[1].desc; ++ epout = &hostif->endpoint[DLN2_EP_OUT].desc; + if (!usb_endpoint_is_bulk_out(epout)) + return -ENODEV; ++ epin = &hostif->endpoint[DLN2_EP_IN].desc; + if (!usb_endpoint_is_bulk_in(epin)) + return -ENODEV; + +diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c +index a4f7e8e689d3..01f222758910 100644 +--- a/drivers/mmc/host/mmci_stm32_sdmmc.c ++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c +@@ -315,11 +315,11 @@ complete: + if (host->busy_status) { + writel_relaxed(mask & ~host->variant->busy_detect_mask, + base + MMCIMASK0); +- writel_relaxed(host->variant->busy_detect_mask, +- base + MMCICLEAR); + host->busy_status = 0; + } + ++ writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); ++ + return true; + } + +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c +index 500f70a6ee42..1e52a100b620 100644 +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -758,23 +758,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) + { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); +- u32 val; ++ u32 val, bus_width = 0; + ++ /* ++ * Add delay to make sure all the DMA transfers are finished ++ * for quirk. ++ */ + if (esdhc->quirk_delay_before_data_reset && + (mask & SDHCI_RESET_DATA) && + (host->flags & SDHCI_REQ_USE_DMA)) + mdelay(5); + ++ /* ++ * Save bus-width for eSDHC whose vendor version is 2.2 ++ * or lower for data reset. ++ */ ++ if ((mask & SDHCI_RESET_DATA) && ++ (esdhc->vendor_ver <= VENDOR_V_22)) { ++ val = sdhci_readl(host, ESDHC_PROCTL); ++ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; ++ } ++ + sdhci_reset(host, mask); + +- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ /* ++ * Restore bus-width setting and interrupt registers for eSDHC ++ * whose vendor version is 2.2 or lower for data reset. ++ */ ++ if ((mask & SDHCI_RESET_DATA) && ++ (esdhc->vendor_ver <= VENDOR_V_22)) { ++ val = sdhci_readl(host, ESDHC_PROCTL); ++ val &= ~ESDHC_CTRL_BUSWIDTH_MASK; ++ val |= bus_width; ++ sdhci_writel(host, val, ESDHC_PROCTL); ++ ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ } + +- if (mask & SDHCI_RESET_ALL) { ++ /* ++ * Some bits have to be cleaned manually for eSDHC whose spec ++ * version is higher than 3.0 for all reset. ++ */ ++ if ((mask & SDHCI_RESET_ALL) && ++ (esdhc->spec_ver >= SDHCI_SPEC_300)) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + ++ /* ++ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to ++ * 0 for quirk. ++ */ + if (esdhc->quirk_unreliable_pulse_detection) { + val = sdhci_readl(host, ESDHC_DLLCFG1); + val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 659a9459ace3..1c9ca6864be3 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -992,7 +992,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + +-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) ++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) + { + if (enable) + host->ier |= SDHCI_INT_DATA_TIMEOUT; +@@ -1001,28 +1001,31 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } ++EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); + +-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) ++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) + { +- u8 count; +- +- if (host->ops->set_timeout) { +- host->ops->set_timeout(host, cmd); +- } else { +- bool too_big = false; +- +- count = sdhci_calc_timeout(host, cmd, &too_big); ++ bool too_big = false; ++ u8 count = sdhci_calc_timeout(host, cmd, &too_big); ++ ++ if (too_big && ++ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { ++ sdhci_calc_sw_timeout(host, cmd); ++ sdhci_set_data_timeout_irq(host, false); ++ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { ++ sdhci_set_data_timeout_irq(host, true); ++ } + +- if (too_big && +- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { +- sdhci_calc_sw_timeout(host, cmd); +- sdhci_set_data_timeout_irq(host, false); +- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { +- sdhci_set_data_timeout_irq(host, true); +- } ++ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); ++} ++EXPORT_SYMBOL_GPL(__sdhci_set_timeout); + +- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); +- } ++static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) ++{ ++ if (host->ops->set_timeout) ++ host->ops->set_timeout(host, cmd); ++ else ++ __sdhci_set_timeout(host, cmd); + } + + static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index fe83ece6965b..76e69288632d 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -795,5 +795,7 @@ void sdhci_end_tuning(struct sdhci_host *host); + void sdhci_reset_tuning(struct sdhci_host *host); + void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); + void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); ++void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); ++void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); + + #endif /* __SDHCI_HW_H */ +diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c +index f6c7102a1e32..664a8db1ecd7 100644 +--- a/drivers/mtd/nand/raw/cadence-nand-controller.c ++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c +@@ -997,6 +997,7 @@ static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl, + return status; + + cadence_nand_reset_irq(cdns_ctrl); ++ reinit_completion(&cdns_ctrl->complete); + + writel_relaxed((u32)cdns_ctrl->dma_cdma_desc, + cdns_ctrl->reg + CMD_REG2); +@@ -2585,7 +2586,7 @@ int cadence_nand_attach_chip(struct nand_chip *chip) + { + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); +- u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes; ++ u32 ecc_size; + struct mtd_info *mtd = nand_to_mtd(chip); + u32 max_oob_data_size; + int ret; +@@ -2603,12 +2604,9 @@ int cadence_nand_attach_chip(struct nand_chip *chip) + chip->options |= NAND_NO_SUBPAGE_WRITE; + + cdns_chip->bbm_offs = chip->badblockpos; +- if (chip->options & NAND_BUSWIDTH_16) { +- cdns_chip->bbm_offs &= ~0x01; +- cdns_chip->bbm_len = 2; +- } else { +- cdns_chip->bbm_len = 1; +- } ++ cdns_chip->bbm_offs &= ~0x01; ++ /* this value should be even number */ ++ cdns_chip->bbm_len = 2; + + ret = nand_ecc_choose_conf(chip, + &cdns_ctrl->ecc_caps, +@@ -2625,6 +2623,7 @@ int cadence_nand_attach_chip(struct nand_chip *chip) + /* Error correction configuration. */ + cdns_chip->sector_size = chip->ecc.size; + cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size; ++ ecc_size = cdns_chip->sector_count * chip->ecc.bytes; + + cdns_chip->avail_oob_size = mtd->oobsize - ecc_size; + +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index 89f6beefb01c..5750c45019d8 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -568,18 +568,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, + static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) + { + struct spinand_device *spinand = nand_to_spinand(nand); ++ u8 marker[2] = { }; + struct nand_page_io_req req = { + .pos = *pos, +- .ooblen = 2, ++ .ooblen = sizeof(marker), + .ooboffs = 0, +- .oobbuf.in = spinand->oobbuf, ++ .oobbuf.in = marker, + .mode = MTD_OPS_RAW, + }; + +- memset(spinand->oobbuf, 0, 2); + spinand_select_target(spinand, pos->target); + spinand_read_page(spinand, &req, false); +- if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff) ++ if (marker[0] != 0xff || marker[1] != 0xff) + return true; + + return false; +@@ -603,15 +603,15 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) + static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) + { + struct spinand_device *spinand = nand_to_spinand(nand); ++ u8 marker[2] = { }; + struct nand_page_io_req req = { + .pos = *pos, + .ooboffs = 0, +- .ooblen = 2, +- .oobbuf.out = spinand->oobbuf, ++ .ooblen = sizeof(marker), ++ .oobbuf.out = marker, + }; + int ret; + +- /* Erase block before marking it bad. */ + ret = spinand_select_target(spinand, pos->target); + if (ret) + return ret; +@@ -620,9 +620,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) + if (ret) + return ret; + +- spinand_erase_op(spinand, pos); +- +- memset(spinand->oobbuf, 0, 2); + return spinand_write_page(spinand, &req); + } + +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +index 58a039c3224a..af1f40cbccc8 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +@@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; ++ c.u.ts.sign = (delta < 0) ? 1 : 0; ++ if (delta < 0) ++ delta = -delta; + c.u.ts.tm = cpu_to_be64(delta); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +index eb53c15b13f3..5f2d57d1b2d3 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +@@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + + spin_unlock_bh(&cmdq->cmdq_lock); + +- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { ++ if (!wait_for_completion_timeout(&done, ++ msecs_to_jiffies(CMDQ_TIMEOUT))) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdq->errcode[curr_prod_idx] == &errcode) +@@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, + if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) + return -EBUSY; + ++ dma_rmb(); ++ + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); + + cmdq_sync_cmd_handler(cmdq, ci, errcode); +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +index 79b3d53f2fbf..c7c75b772a86 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +@@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev) + return -EFAULT; + } + +-static int wait_for_io_stopped(struct hinic_hwdev *hwdev) +-{ +- struct hinic_cmd_io_status cmd_io_status; +- struct hinic_hwif *hwif = hwdev->hwif; +- struct pci_dev *pdev = hwif->pdev; +- struct hinic_pfhwdev *pfhwdev; +- unsigned long end; +- u16 out_size; +- int err; +- +- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { +- dev_err(&pdev->dev, "Unsupported PCI Function type\n"); +- return -EINVAL; +- } +- +- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); +- +- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); +- +- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); +- do { +- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, +- HINIC_COMM_CMD_IO_STATUS_GET, +- &cmd_io_status, sizeof(cmd_io_status), +- &cmd_io_status, &out_size, +- HINIC_MGMT_MSG_SYNC); +- if ((err) || (out_size != sizeof(cmd_io_status))) { +- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", +- err); +- return err; +- } +- +- if (cmd_io_status.status == IO_STOPPED) { +- dev_info(&pdev->dev, "IO stopped\n"); +- return 0; +- } +- +- msleep(20); +- } while (time_before(jiffies, end)); +- +- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); +- return -ETIMEDOUT; +-} +- + /** + * clear_io_resource - set the IO resources as not active in the NIC + * @hwdev: the NIC HW device +@@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev) + return -EINVAL; + } + +- err = wait_for_io_stopped(hwdev); +- if (err) { +- dev_err(&pdev->dev, "IO has not stopped yet\n"); +- return err; +- } ++ /* sleep 100ms to wait for firmware stopping I/O */ ++ msleep(100); + + cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +index 79243b626ddb..c0b6bcb067cd 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +@@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val) + * eq_update_ci - update the HW cons idx of event queue + * @eq: the event queue to update the cons idx for + **/ +-static void eq_update_ci(struct hinic_eq *eq) ++static void eq_update_ci(struct hinic_eq *eq, u32 arm_state) + { + u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); + +@@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq) + + val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | + HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | +- HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); ++ HINIC_EQ_CI_SET(arm_state, INT_ARMED); + + val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + +@@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq) + if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + break; + ++ dma_rmb(); ++ + event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (event >= HINIC_MAX_AEQ_EVENTS) { + dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); +@@ -347,7 +349,7 @@ static void eq_irq_handler(void *data) + else if (eq->type == HINIC_CEQ) + ceq_irq_handler(eq); + +- eq_update_ci(eq); ++ eq_update_ci(eq, EQ_ARMED); + } + + /** +@@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, + } + + set_eq_ctrls(eq); +- eq_update_ci(eq); ++ eq_update_ci(eq, EQ_ARMED); + + err = alloc_eq_pages(eq); + if (err) { +@@ -752,18 +754,28 @@ err_req_irq: + **/ + static void remove_eq(struct hinic_eq *eq) + { +- struct msix_entry *entry = &eq->msix_entry; +- +- free_irq(entry->vector, eq); ++ hinic_set_msix_state(eq->hwif, eq->msix_entry.entry, ++ HINIC_MSIX_DISABLE); ++ free_irq(eq->msix_entry.vector, eq); + + if (eq->type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); ++ /* clear aeq_len to avoid hw access host memory */ ++ hinic_hwif_write_reg(eq->hwif, ++ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + } else if (eq->type == HINIC_CEQ) { + tasklet_kill(&eq->ceq_tasklet); ++ /* clear ceq_len to avoid hw access host memory */ ++ hinic_hwif_write_reg(eq->hwif, ++ HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0); + } + ++ /* update cons_idx to avoid invalid interrupt */ ++ eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq)); ++ eq_update_ci(eq, EQ_NOT_ARMED); ++ + free_eq_pages(eq); + } + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +index c1a6be6bf6a8..8995e32dd1c0 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +@@ -43,7 +43,7 @@ + + #define MSG_NOT_RESP 0xFFFF + +-#define MGMT_MSG_TIMEOUT 1000 ++#define MGMT_MSG_TIMEOUT 5000 + + #define mgmt_to_pfhwdev(pf_mgmt) \ + container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) +@@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + goto unlock_sync_msg; + } + +- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { ++ if (!wait_for_completion_timeout(recv_done, ++ msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { + dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + err = -ETIMEDOUT; + goto unlock_sync_msg; +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c +index 2695ad69fca6..815649e37cb1 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c +@@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) + if (!rq_wqe) + break; + ++ /* make sure we read rx_done before packet length */ ++ dma_rmb(); ++ + cqe = rq->cqe[ci]; + status = be32_to_cpu(cqe->status); + hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c +index 0e13d1c7e474..365016450bdb 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c +@@ -45,7 +45,7 @@ + + #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) + +-#define MIN_SKB_LEN 17 ++#define MIN_SKB_LEN 32 + + #define MAX_PAYLOAD_OFFSET 221 + #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) +@@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget) + do { + hw_ci = HW_CONS_IDX(sq) & wq->mask; + ++ dma_rmb(); ++ + /* Reading a WQEBB to get real WQE size and consumer index. */ + sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); + if ((!sq_wqe) || +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h +index e678ba379598..628fa9b2f741 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h +@@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); + if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ + (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ + if ((mask & VXGE_DEBUG_MASK) == mask) \ +- printk(fmt "\n", __VA_ARGS__); \ ++ printk(fmt "\n", ##__VA_ARGS__); \ + } while (0) + #else + #define vxge_debug_ll(level, mask, fmt, ...) +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h +index 59a57ff5e96a..9c86f4f9cd42 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h +@@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); + + #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) + #define vxge_debug_ll_config(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_ll_config(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) + #define vxge_debug_init(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_init(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) + #define vxge_debug_tx(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_tx(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) + #define vxge_debug_rx(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_rx(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) + #define vxge_debug_mem(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_mem(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) + #define vxge_debug_entryexit(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_entryexit(level, fmt, ...) + #endif + + #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) + #define vxge_debug_intr(level, fmt, ...) \ +- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) ++ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) + #else + #define vxge_debug_intr(level, fmt, ...) + #endif +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +index 07f9067affc6..cda5b0a9e948 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d + + ahw->reset.seq_error = 0; + ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); +- if (p_dev->ahw->reset.buff == NULL) ++ if (ahw->reset.buff == NULL) + return -ENOMEM; + + p_buff = p_dev->ahw->reset.buff; +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index fbf4cbcf1a65..02cdbb22d335 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -279,7 +279,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + { + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; +- struct rmnet_endpoint *ep; + struct rmnet_port *port; + u16 mux_id; + +@@ -294,19 +293,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); +- if (rmnet_get_endpoint(port, mux_id)) { +- NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); +- return -EINVAL; +- } +- ep = rmnet_get_endpoint(port, priv->mux_id); +- if (!ep) +- return -ENODEV; + +- hlist_del_init_rcu(&ep->hlnode); +- hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); ++ if (mux_id != priv->mux_id) { ++ struct rmnet_endpoint *ep; ++ ++ ep = rmnet_get_endpoint(port, priv->mux_id); ++ if (!ep) ++ return -ENODEV; + +- ep->mux_id = mux_id; +- priv->mux_id = mux_id; ++ if (rmnet_get_endpoint(port, mux_id)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "MUX ID already exists"); ++ return -EINVAL; ++ } ++ ++ hlist_del_init_rcu(&ep->hlnode); ++ hlist_add_head_rcu(&ep->hlnode, ++ &port->muxed_ep[mux_id]); ++ ++ ep->mux_id = mux_id; ++ priv->mux_id = mux_id; ++ } + } + + if (data[IFLA_RMNET_FLAGS]) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +index d10ac54bf385..13fafd905db8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev, + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ +- stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); ++ stmmac_res->wol_irq = ++ platform_get_irq_byname_optional(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; ++ dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n"); + stmmac_res->wol_irq = stmmac_res->irq; + } + +- stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); +- if (stmmac_res->lpi_irq == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ stmmac_res->lpi_irq = ++ platform_get_irq_byname_optional(pdev, "eth_lpi"); ++ if (stmmac_res->lpi_irq < 0) { ++ if (stmmac_res->lpi_irq == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ dev_info(&pdev->dev, "IRQ eth_lpi not found\n"); ++ } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index 0548aa3702e3..ef2b856670e1 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) + ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); + } + ++ if (changed & IEEE80211_CONF_CHANGE_POWER) ++ ath9k_set_txpower(sc, NULL); ++ + mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +index 80ef238a8488..ca99a9c4f70e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +@@ -6,7 +6,7 @@ + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2018 - 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -27,7 +27,7 @@ + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH +- * Copyright(c) 2018 - 2019 Intel Corporation ++ * Copyright(c) 2018 - 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -195,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, + { + u16 supp; + int i, highest_mcs; ++ u8 nss = sta->rx_nss; + +- for (i = 0; i < sta->rx_nss; i++) { +- if (i == IWL_TLC_NSS_MAX) +- break; ++ /* the station support only a single receive chain */ ++ if (sta->smps_mode == IEEE80211_SMPS_STATIC) ++ nss = 1; + ++ for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { + highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); + if (!highest_mcs) + continue; +@@ -245,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, + u16 tx_mcs_160 = + le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); + int i; ++ u8 nss = sta->rx_nss; + +- for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { ++ /* the station support only a single receive chain */ ++ if (sta->smps_mode == IEEE80211_SMPS_STATIC) ++ nss = 1; ++ ++ for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { + u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; + u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; + u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; +@@ -307,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, + cmd->mode = IWL_TLC_MNG_MODE_HT; + cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[0]); +- cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = +- cpu_to_le16(ht_cap->mcs.rx_mask[1]); ++ ++ /* the station support only a single receive chain */ ++ if (sta->smps_mode == IEEE80211_SMPS_STATIC) ++ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = ++ 0; ++ else ++ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = ++ cpu_to_le16(ht_cap->mcs.rx_mask[1]); + } + } + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +index c0b420fe5e48..1babc4bb5194 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +@@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, + if (!le32_to_cpu(notif->status)) { + iwl_mvm_te_check_disconnect(mvm, vif, + "Session protection failure"); ++ spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); ++ spin_unlock_bh(&mvm->time_event_lock); + } + + if (le32_to_cpu(notif->start)) { +@@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, + */ + iwl_mvm_te_check_disconnect(mvm, vif, + "No beacon heard and the session protection is over already..."); ++ spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); ++ spin_unlock_bh(&mvm->time_event_lock); + } + + goto out_unlock; +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index 5a70ac395d53..c0c4b1587ba0 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + !template->ls_req || !template->fcp_io || + !template->ls_abort || !template->fcp_abort || + !template->max_hw_queues || !template->max_sgl_segments || +- !template->max_dif_sgl_segments || !template->dma_boundary || +- !template->module) { ++ !template->max_dif_sgl_segments || !template->dma_boundary) { + ret = -EINVAL; + goto out_reghost_failed; + } +@@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref) + { + struct nvme_fc_ctrl *ctrl = + container_of(ref, struct nvme_fc_ctrl, ref); +- struct nvme_fc_lport *lport = ctrl->lport; + unsigned long flags; + + if (ctrl->ctrl.tagset) { +@@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref) + if (ctrl->ctrl.opts) + nvmf_free_options(ctrl->ctrl.opts); + kfree(ctrl); +- module_put(lport->ops->module); + } + + static void +@@ -3074,15 +3071,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + goto out_fail; + } + +- if (!try_module_get(lport->ops->module)) { +- ret = -EUNATCH; +- goto out_free_ctrl; +- } +- + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; +- goto out_mod_put; ++ goto out_free_ctrl; + } + + ctrl->ctrl.opts = opts; +@@ -3235,8 +3227,6 @@ out_free_queues: + out_free_ida: + put_device(ctrl->dev); + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +-out_mod_put: +- module_put(lport->ops->module); + out_free_ctrl: + kfree(ctrl); + out_fail: +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c +index 1c50af6219f3..b50b53db3746 100644 +--- a/drivers/nvme/target/fcloop.c ++++ b/drivers/nvme/target/fcloop.c +@@ -850,7 +850,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) + #define FCLOOP_DMABOUND_4G 0xFFFFFFFF + + static struct nvme_fc_port_template fctemplate = { +- .module = THIS_MODULE, + .localport_delete = fcloop_localport_delete, + .remoteport_delete = fcloop_remoteport_delete, + .create_queue = fcloop_create_queue, +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index 5bb5342b8d0c..5b535f2e7161 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -794,7 +794,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) + icresp->hdr.pdo = 0; + icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); + icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); +- icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */ ++ icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ + icresp->cpda = 0; + if (queue->hdr_digest) + icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c +index 7e581748ee9f..70ded8900e28 100644 +--- a/drivers/pci/controller/dwc/pcie-qcom.c ++++ b/drivers/pci/controller/dwc/pcie-qcom.c +@@ -1289,7 +1289,13 @@ static void qcom_fixup_class(struct pci_dev *dev) + { + dev->class = PCI_CLASS_BRIDGE_PCI << 8; + } +-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); + + static struct platform_driver qcom_pcie_driver = { + .probe = qcom_pcie_probe, +diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c +index d2b174ce15de..abfac1109a13 100644 +--- a/drivers/pci/endpoint/pci-epc-mem.c ++++ b/drivers/pci/endpoint/pci-epc-mem.c +@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, + mem->page_size = page_size; + mem->pages = pages; + mem->size = size; ++ mutex_init(&mem->lock); + + epc->mem = mem; + +@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + phys_addr_t *phys_addr, size_t size) + { + int pageno; +- void __iomem *virt_addr; ++ void __iomem *virt_addr = NULL; + struct pci_epc_mem *mem = epc->mem; + unsigned int page_shift = ilog2(mem->page_size); + int order; +@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); + ++ mutex_lock(&mem->lock); + pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); + if (pageno < 0) +- return NULL; ++ goto ret; + + *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift); + virt_addr = ioremap(*phys_addr, size); + if (!virt_addr) + bitmap_release_region(mem->bitmap, pageno, order); + ++ret: ++ mutex_unlock(&mem->lock); + return virt_addr; + } + EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); +@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, + pageno = (phys_addr - mem->phys_base) >> page_shift; + size = ALIGN(size, mem->page_size); + order = pci_epc_mem_get_order(mem, size); ++ mutex_lock(&mem->lock); + bitmap_release_region(mem->bitmap, pageno, order); ++ mutex_unlock(&mem->lock); + } + EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); + +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c +index 8a2cb1764386..14e6dccae8f1 100644 +--- a/drivers/pci/hotplug/pciehp_hpc.c ++++ b/drivers/pci/hotplug/pciehp_hpc.c +@@ -625,17 +625,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) + if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) { + ret = pciehp_isr(irq, dev_id); + enable_irq(irq); +- if (ret != IRQ_WAKE_THREAD) { +- pci_config_pm_runtime_put(pdev); +- return ret; +- } ++ if (ret != IRQ_WAKE_THREAD) ++ goto out; + } + + synchronize_hardirq(irq); + events = atomic_xchg(&ctrl->pending_events, 0); + if (!events) { +- pci_config_pm_runtime_put(pdev); +- return IRQ_NONE; ++ ret = IRQ_NONE; ++ goto out; + } + + /* Check Attention Button Pressed */ +@@ -664,10 +662,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) + pciehp_handle_presence_or_link_change(ctrl, events); + up_read(&ctrl->reset_lock); + ++ ret = IRQ_HANDLED; ++out: + pci_config_pm_runtime_put(pdev); + ctrl->ist_running = false; + wake_up(&ctrl->requester); +- return IRQ_HANDLED; ++ return ret; + } + + static int pciehp_poll(void *data) +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 0dcd44308228..c2596e79ec63 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -747,9 +747,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) + + /* Enable what we need to enable */ + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, +- PCI_L1SS_CAP_L1_PM_SS, val); ++ PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, +- PCI_L1SS_CAP_L1_PM_SS, val); ++ PCI_L1SS_CTL1_L1SS_MASK, val); + } + + static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 83953752337c..54903ecfa696 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -1970,26 +1970,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk + /* + * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no + * 300641-004US, section 5.7.3. ++ * ++ * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003. ++ * Core IO on Xeon E5 v2, see Intel order no 329188-003. ++ * Core IO on Xeon E7 v2, see Intel order no 329595-002. ++ * Core IO on Xeon E5 v3, see Intel order no 330784-003. ++ * Core IO on Xeon E7 v3, see Intel order no 332315-001US. ++ * Core IO on Xeon E5 v4, see Intel order no 333810-002US. ++ * Core IO on Xeon E7 v4, see Intel order no 332315-001US. ++ * Core IO on Xeon D-1500, see Intel order no 332051-001. ++ * Core IO on Xeon Scalable, see Intel order no 610950. + */ +-#define INTEL_6300_IOAPIC_ABAR 0x40 ++#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */ + #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) + ++#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */ ++#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25) ++ + static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) + { + u16 pci_config_word; ++ u32 pci_config_dword; + + if (noioapicquirk) + return; + +- pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); +- pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; +- pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); +- ++ switch (dev->device) { ++ case PCI_DEVICE_ID_INTEL_ESB_10: ++ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, ++ &pci_config_word); ++ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; ++ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, ++ pci_config_word); ++ break; ++ case 0x3c28: /* Xeon E5 1600/2600/4600 */ ++ case 0x0e28: /* Xeon E5/E7 V2 */ ++ case 0x2f28: /* Xeon E5/E7 V3,V4 */ ++ case 0x6f28: /* Xeon D-1500 */ ++ case 0x2034: /* Xeon Scalable Family */ ++ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, ++ &pci_config_dword); ++ pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH; ++ pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, ++ pci_config_dword); ++ break; ++ default: ++ return; ++ } + pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", + dev->vendor, dev->device); + } +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); ++/* ++ * Device 29 Func 5 Device IDs of IO-APIC ++ * containing ABAR—APIC1 Alternate Base Address Register ++ */ ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, ++ quirk_disable_intel_boot_interrupt); ++ ++/* ++ * Device 5 Func 0 Device IDs of Core IO modules/hubs ++ * containing Coherent Interface Protocol Interrupt Control ++ * ++ * Device IDs obtained from volume 2 datasheets of commented ++ * families above. ++ */ ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28, ++ quirk_disable_intel_boot_interrupt); ++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034, ++ quirk_disable_intel_boot_interrupt); + + /* Disable boot interrupts on HT-1000 */ + #define BC_HT1000_FEATURE_REG 0x64 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c +index 9c3ad09d3022..fb4602d44eb1 100644 +--- a/drivers/pci/switch/switchtec.c ++++ b/drivers/pci/switch/switchtec.c +@@ -175,7 +175,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) + kref_get(&stuser->kref); + stuser->read_len = sizeof(stuser->data); + stuser_set_state(stuser, MRPC_QUEUED); +- init_completion(&stuser->comp); ++ reinit_completion(&stuser->comp); + list_add_tail(&stuser->list, &stdev->mrpc_queue); + + mrpc_cmd_submit(stdev); +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 982f0cc8270c..41e28552b2ce 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -418,8 +418,11 @@ static int asus_wmi_battery_add(struct power_supply *battery) + { + /* The WMI method does not provide a way to specific a battery, so we + * just assume it is the first battery. ++ * Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first ++ * battery is named BATT. + */ +- if (strcmp(battery->desc->name, "BAT0") != 0) ++ if (strcmp(battery->desc->name, "BAT0") != 0 && ++ strcmp(battery->desc->name, "BATT") != 0) + return -ENODEV; + + if (device_create_file(&battery->dev, +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c +index 164fc2a53ef1..5961177a9ce9 100644 +--- a/drivers/remoteproc/qcom_q6v5_mss.c ++++ b/drivers/remoteproc/qcom_q6v5_mss.c +@@ -887,11 +887,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + } + +- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, +- false, qproc->mpss_phys, +- qproc->mpss_size); +- WARN_ON(ret); +- + q6v5_reset_assert(qproc); + + q6v5_clk_disable(qproc->dev, qproc->reset_clks, +@@ -921,6 +916,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) + } + } + ++static int q6v5_reload_mba(struct rproc *rproc) ++{ ++ struct q6v5 *qproc = rproc->priv; ++ const struct firmware *fw; ++ int ret; ++ ++ ret = request_firmware(&fw, rproc->firmware, qproc->dev); ++ if (ret < 0) ++ return ret; ++ ++ q6v5_load(rproc, fw); ++ ret = q6v5_mba_load(qproc); ++ release_firmware(fw); ++ ++ return ret; ++} ++ + static int q6v5_mpss_load(struct q6v5 *qproc) + { + const struct elf32_phdr *phdrs; +@@ -981,6 +993,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + ++ /** ++ * In case of a modem subsystem restart on secure devices, the modem ++ * memory can be reclaimed only after MBA is loaded. For modem cold ++ * boot this will be a nop ++ */ ++ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, ++ qproc->mpss_phys, qproc->mpss_size); ++ + mpss_reloc = relocate ? min_addr : qproc->mpss_phys; + qproc->mpss_reloc = mpss_reloc; + /* Load firmware segments */ +@@ -1070,8 +1090,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, + void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); + + /* Unlock mba before copying segments */ +- if (!qproc->dump_mba_loaded) +- ret = q6v5_mba_load(qproc); ++ if (!qproc->dump_mba_loaded) { ++ ret = q6v5_reload_mba(rproc); ++ if (!ret) { ++ /* Reset ownership back to Linux to copy segments */ ++ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, ++ false, ++ qproc->mpss_phys, ++ qproc->mpss_size); ++ } ++ } + + if (!ptr || ret) + memset(dest, 0xff, segment->size); +@@ -1082,8 +1110,14 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, + + /* Reclaim mba after copying segments */ + if (qproc->dump_segment_mask == qproc->dump_complete_mask) { +- if (qproc->dump_mba_loaded) ++ if (qproc->dump_mba_loaded) { ++ /* Try to reset ownership back to Q6 */ ++ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, ++ true, ++ qproc->mpss_phys, ++ qproc->mpss_size); + q6v5_mba_reclaim(qproc); ++ } + } + } + +@@ -1123,10 +1157,6 @@ static int q6v5_start(struct rproc *rproc) + return 0; + + reclaim_mpss: +- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, +- false, qproc->mpss_phys, +- qproc->mpss_size); +- WARN_ON(xfermemop_ret); + q6v5_mba_reclaim(qproc); + + return ret; +diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c +index 8c07cb2ca8ba..31a62a0b470e 100644 +--- a/drivers/remoteproc/remoteproc_virtio.c ++++ b/drivers/remoteproc/remoteproc_virtio.c +@@ -334,6 +334,13 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) + struct rproc_mem_entry *mem; + int ret; + ++ if (rproc->ops->kick == NULL) { ++ ret = -EINVAL; ++ dev_err(dev, ".kick method not defined for %s", ++ rproc->name); ++ goto out; ++ } ++ + /* Try to find dedicated vdev buffer carveout */ + mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); + if (mem) { +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index 93655b85b73f..18a6751299f9 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -725,7 +725,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) + adapter->peer_d_id); + if (IS_ERR(port)) /* error or port already attached */ + return; +- _zfcp_erp_port_reopen(port, 0, "ereptp1"); ++ zfcp_erp_port_reopen(port, 0, "ereptp1"); + } + + static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf( +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h +index 935f98804198..ccfc934dce7b 100644 +--- a/drivers/scsi/lpfc/lpfc.h ++++ b/drivers/scsi/lpfc/lpfc.h +@@ -749,6 +749,7 @@ struct lpfc_hba { + * capability + */ + #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ ++#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ + + uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ + struct lpfc_dmabuf slim2p; +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c +index 85ada3deb47d..bca8c4868f6e 100644 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c +@@ -1162,13 +1162,16 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) + } + + /* Start discovery by sending a FLOGI. port_state is identically +- * LPFC_FLOGI while waiting for FLOGI cmpl ++ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending ++ * the FLOGI is being deferred till after MBX_READ_SPARAM completes. + */ +- if (vport->port_state != LPFC_FLOGI) +- lpfc_initial_flogi(vport); +- else if (vport->fc_flag & FC_PT2PT) +- lpfc_disc_start(vport); +- ++ if (vport->port_state != LPFC_FLOGI) { ++ if (!(phba->hba_flag & HBA_DEFER_FLOGI)) ++ lpfc_initial_flogi(vport); ++ } else { ++ if (vport->fc_flag & FC_PT2PT) ++ lpfc_disc_start(vport); ++ } + return; + + out: +@@ -3093,6 +3096,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(pmb, phba->mbox_mem_pool); ++ ++ /* Check if sending the FLOGI is being deferred to after we get ++ * up to date CSPs from MBX_READ_SPARAM. ++ */ ++ if (phba->hba_flag & HBA_DEFER_FLOGI) { ++ lpfc_initial_flogi(vport); ++ phba->hba_flag &= ~HBA_DEFER_FLOGI; ++ } + return; + + out: +@@ -3223,6 +3234,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) + } + + lpfc_linkup(phba); ++ sparam_mbox = NULL; ++ ++ if (!(phba->hba_flag & HBA_FCOE_MODE)) { ++ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); ++ if (!cfglink_mbox) ++ goto out; ++ vport->port_state = LPFC_LOCAL_CFG_LINK; ++ lpfc_config_link(phba, cfglink_mbox); ++ cfglink_mbox->vport = vport; ++ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; ++ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); ++ if (rc == MBX_NOT_FINISHED) { ++ mempool_free(cfglink_mbox, phba->mbox_mem_pool); ++ goto out; ++ } ++ } ++ + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!sparam_mbox) + goto out; +@@ -3243,20 +3271,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) + goto out; + } + +- if (!(phba->hba_flag & HBA_FCOE_MODE)) { +- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); +- if (!cfglink_mbox) +- goto out; +- vport->port_state = LPFC_LOCAL_CFG_LINK; +- lpfc_config_link(phba, cfglink_mbox); +- cfglink_mbox->vport = vport; +- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; +- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); +- if (rc == MBX_NOT_FINISHED) { +- mempool_free(cfglink_mbox, phba->mbox_mem_pool); +- goto out; +- } +- } else { ++ if (phba->hba_flag & HBA_FCOE_MODE) { + vport->port_state = LPFC_VPORT_UNKNOWN; + /* + * Add the driver's default FCF record at FCF index 0 now. This +@@ -3313,6 +3328,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) + } + /* Reset FCF roundrobin bmask for new discovery */ + lpfc_sli4_clear_fcf_rr_bmask(phba); ++ } else { ++ if (phba->bbcredit_support && phba->cfg_enable_bbcr && ++ !(phba->link_flag & LS_LOOPBACK_MODE)) ++ phba->hba_flag |= HBA_DEFER_FLOGI; + } + + return; +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c +index f6c8963c915d..db4a04a207ec 100644 +--- a/drivers/scsi/lpfc/lpfc_nvme.c ++++ b/drivers/scsi/lpfc/lpfc_nvme.c +@@ -1985,8 +1985,6 @@ out_unlock: + + /* Declare and initialization an instance of the FC NVME template. */ + static struct nvme_fc_port_template lpfc_nvme_template = { +- .module = THIS_MODULE, +- + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index b138d9fee675..ab5a538f6ac5 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -671,8 +671,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + lpfc_cmd->prot_data_type = 0; + #endif + tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); +- if (!tmp) ++ if (!tmp) { ++ lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); + return NULL; ++ } + + lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; + lpfc_cmd->fcp_rsp = tmp->fcp_rsp; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index a038be8a0e90..c919cb7ad524 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -9747,8 +9747,8 @@ static void scsih_remove(struct pci_dev *pdev) + + ioc->remove_host = 1; + +- mpt3sas_wait_for_commands_to_complete(ioc); +- _scsih_flush_running_cmds(ioc); ++ if (!pci_device_is_present(pdev)) ++ _scsih_flush_running_cmds(ioc); + + _scsih_fw_event_cleanup_queue(ioc); + +@@ -9831,8 +9831,8 @@ scsih_shutdown(struct pci_dev *pdev) + + ioc->remove_host = 1; + +- mpt3sas_wait_for_commands_to_complete(ioc); +- _scsih_flush_running_cmds(ioc); ++ if (!pci_device_is_present(pdev)) ++ _scsih_flush_running_cmds(ioc); + + _scsih_fw_event_cleanup_queue(ioc); + +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c +index bfcd02fdf2b8..941aa53363f5 100644 +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) + } + + static struct nvme_fc_port_template qla_nvme_fc_transport = { +- .module = THIS_MODULE, + .localport_delete = qla_nvme_localport_delete, + .remoteport_delete = qla_nvme_remoteport_delete, + .create_queue = qla_nvme_alloc_queue, +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 5340a980d24b..5d8f6356c556 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -5513,7 +5513,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) + static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, + u32 intr_mask) + { +- if (!ufshcd_is_auto_hibern8_supported(hba)) ++ if (!ufshcd_is_auto_hibern8_supported(hba) || ++ !ufshcd_is_auto_hibern8_enabled(hba)) + return false; + + if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h +index 46bec0e18c72..e4df58827bcd 100644 +--- a/drivers/scsi/ufs/ufshcd.h ++++ b/drivers/scsi/ufs/ufshcd.h +@@ -55,6 +55,7 @@ + #include <linux/clk.h> + #include <linux/completion.h> + #include <linux/regulator/consumer.h> ++#include <linux/bitfield.h> + #include "unipro.h" + + #include <asm/irq.h> +@@ -781,6 +782,11 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) + return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT); + } + ++static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) ++{ ++ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; ++} ++ + #define ufshcd_writel(hba, val, reg) \ + writel((val), (hba)->mmio_base + (reg)) + #define ufshcd_readl(hba, reg) \ +diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c +index 70014ecce2a7..7b642c330977 100644 +--- a/drivers/soc/fsl/dpio/dpio-driver.c ++++ b/drivers/soc/fsl/dpio/dpio-driver.c +@@ -233,10 +233,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) + goto err_allocate_irqs; + } + +- err = register_dpio_irq_handlers(dpio_dev, desc.cpu); +- if (err) +- goto err_register_dpio_irq; +- + priv->io = dpaa2_io_create(&desc, dev); + if (!priv->io) { + dev_err(dev, "dpaa2_io_create failed\n"); +@@ -244,6 +240,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) + goto err_dpaa2_io_create; + } + ++ err = register_dpio_irq_handlers(dpio_dev, desc.cpu); ++ if (err) ++ goto err_register_dpio_irq; ++ + dev_info(dev, "probed\n"); + dev_dbg(dev, " receives_notifications = %d\n", + desc.receives_notifications); +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index 8428b69c858b..f9d44bb1040f 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -196,8 +196,7 @@ struct fsl_dspi { + u8 bytes_per_word; + const struct fsl_dspi_devtype_data *devtype_data; + +- wait_queue_head_t waitq; +- u32 waitflags; ++ struct completion xfer_done; + + struct fsl_dspi_dma *dma; + }; +@@ -714,10 +713,8 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) + if (!(spi_sr & SPI_SR_EOQF)) + return IRQ_NONE; + +- if (dspi_rxtx(dspi) == 0) { +- dspi->waitflags = 1; +- wake_up_interruptible(&dspi->waitq); +- } ++ if (dspi_rxtx(dspi) == 0) ++ complete(&dspi->xfer_done); + + return IRQ_HANDLED; + } +@@ -815,13 +812,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + status = dspi_poll(dspi); + } while (status == -EINPROGRESS); + } else if (trans_mode != DSPI_DMA_MODE) { +- status = wait_event_interruptible(dspi->waitq, +- dspi->waitflags); +- dspi->waitflags = 0; ++ wait_for_completion(&dspi->xfer_done); ++ reinit_completion(&dspi->xfer_done); + } +- if (status) +- dev_err(&dspi->pdev->dev, +- "Waiting for transfer to complete failed!\n"); + + spi_transfer_delay_exec(transfer); + } +@@ -1021,8 +1014,10 @@ static int dspi_slave_abort(struct spi_master *master) + * Terminate all pending DMA transactions for the SPI working + * in SLAVE mode. + */ +- dmaengine_terminate_sync(dspi->dma->chan_rx); +- dmaengine_terminate_sync(dspi->dma->chan_tx); ++ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { ++ dmaengine_terminate_sync(dspi->dma->chan_rx); ++ dmaengine_terminate_sync(dspi->dma->chan_tx); ++ } + + /* Clear the internal DSPI RX and TX FIFO buffers */ + regmap_update_bits(dspi->regmap, SPI_MCR, +@@ -1159,7 +1154,7 @@ static int dspi_probe(struct platform_device *pdev) + goto out_clk_put; + } + +- init_waitqueue_head(&dspi->waitq); ++ init_completion(&dspi->xfer_done); + + poll_mode: + +diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c +index 6f0cd0784786..c5a262a12e40 100644 +--- a/drivers/staging/media/allegro-dvt/allegro-core.c ++++ b/drivers/staging/media/allegro-dvt/allegro-core.c +@@ -393,7 +393,10 @@ struct mcu_msg_create_channel { + u32 freq_ird; + u32 freq_lt; + u32 gdr_mode; +- u32 gop_length; ++ u16 gop_length; ++ u8 num_b; ++ u8 freq_golden_ref; ++ + u32 unknown39; + + u32 subframe_latency; +diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +index 938b48d4d3d9..be787a045c7e 100644 +--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c ++++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +@@ -67,12 +67,17 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu, + unsigned char *chroma_qtable) + { + u32 reg, i; ++ __be32 *luma_qtable_p; ++ __be32 *chroma_qtable_p; ++ ++ luma_qtable_p = (__be32 *)luma_qtable; ++ chroma_qtable_p = (__be32 *)chroma_qtable; + + for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) { +- reg = get_unaligned_be32(&luma_qtable[i]); ++ reg = get_unaligned_be32(&luma_qtable_p[i]); + vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i)); + +- reg = get_unaligned_be32(&chroma_qtable[i]); ++ reg = get_unaligned_be32(&chroma_qtable_p[i]); + vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i)); + } + } +diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c +index 067892345b5d..bdb95652d6a8 100644 +--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c ++++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c +@@ -98,12 +98,17 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu, + unsigned char *chroma_qtable) + { + u32 reg, i; ++ __be32 *luma_qtable_p; ++ __be32 *chroma_qtable_p; ++ ++ luma_qtable_p = (__be32 *)luma_qtable; ++ chroma_qtable_p = (__be32 *)chroma_qtable; + + for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) { +- reg = get_unaligned_be32(&luma_qtable[i]); ++ reg = get_unaligned_be32(&luma_qtable_p[i]); + vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i)); + +- reg = get_unaligned_be32(&chroma_qtable[i]); ++ reg = get_unaligned_be32(&chroma_qtable_p[i]); + vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i)); + } + } +diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c +index db30e2c70f2f..f45920b3137e 100644 +--- a/drivers/staging/media/imx/imx7-media-csi.c ++++ b/drivers/staging/media/imx/imx7-media-csi.c +@@ -1009,6 +1009,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, + sdformat->format.width = in_fmt->width; + sdformat->format.height = in_fmt->height; + sdformat->format.code = in_fmt->code; ++ sdformat->format.field = in_fmt->field; + *cc = in_cc; + + sdformat->format.colorspace = in_fmt->colorspace; +@@ -1023,6 +1024,9 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, + false); + sdformat->format.code = (*cc)->codes[0]; + } ++ ++ if (sdformat->format.field != V4L2_FIELD_INTERLACED) ++ sdformat->format.field = V4L2_FIELD_NONE; + break; + default: + return -EINVAL; +diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c +index 99166afca071..aa1749b1e28f 100644 +--- a/drivers/staging/media/imx/imx7-mipi-csis.c ++++ b/drivers/staging/media/imx/imx7-mipi-csis.c +@@ -579,7 +579,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable) + state->flags |= ST_STREAMING; + } else { + v4l2_subdev_call(state->src_sd, video, s_stream, 0); +- ret = v4l2_subdev_call(state->src_sd, core, s_power, 1); ++ ret = v4l2_subdev_call(state->src_sd, core, s_power, 0); + mipi_csis_stop_stream(state); + state->flags &= ~ST_STREAMING; + if (state->debug) +diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c +index d3de76126b78..3098399741d7 100644 +--- a/drivers/staging/wilc1000/wlan.c ++++ b/drivers/staging/wilc1000/wlan.c +@@ -578,7 +578,6 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) + entries = ((reg >> 3) & 0x3f); + break; + } +- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); + } while (--timeout); + if (timeout <= 0) { + ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0); +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 1d85c42b9c67..43bd5b1ea9e2 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1029,6 +1029,9 @@ static int dwc3_core_init(struct dwc3 *dwc) + if (dwc->dis_tx_ipgap_linecheck_quirk) + reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + ++ if (dwc->parkmode_disable_ss_quirk) ++ reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; ++ + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); + } + +@@ -1342,6 +1345,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) + "snps,dis-del-phy-power-chg-quirk"); + dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, + "snps,dis-tx-ipgap-linecheck-quirk"); ++ dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, ++ "snps,parkmode-disable-ss-quirk"); + + dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, + "snps,tx_de_emphasis_quirk"); +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 77c4a9abe365..3ecc69c5b150 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -249,6 +249,7 @@ + #define DWC3_GUCTL_HSTINAUTORETRY BIT(14) + + /* Global User Control 1 Register */ ++#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) + #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) + #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) + +@@ -1024,6 +1025,8 @@ struct dwc3_scratchpad_array { + * change quirk. + * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate + * check during HS transmit. ++ * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed ++ * instances in park mode. + * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk + * @tx_de_emphasis: Tx de-emphasis value + * 0 - -6dB de-emphasis +@@ -1215,6 +1218,7 @@ struct dwc3 { + unsigned dis_u2_freeclk_exists_quirk:1; + unsigned dis_del_phy_power_chg_quirk:1; + unsigned dis_tx_ipgap_linecheck_quirk:1; ++ unsigned parkmode_disable_ss_quirk:1; + + unsigned tx_de_emphasis_quirk:1; + unsigned tx_de_emphasis:2; +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 223f72d4d9ed..cb4950cf1cdc 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -861,6 +861,11 @@ static int set_config(struct usb_composite_dev *cdev, + else + power = min(power, 900U); + done: ++ if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) ++ usb_gadget_set_selfpowered(gadget); ++ else ++ usb_gadget_clear_selfpowered(gadget); ++ + usb_gadget_vbus_draw(gadget, power); + if (result >= 0 && cdev->delayed_status) + result = USB_GADGET_DELAYED_STATUS; +@@ -2279,6 +2284,7 @@ void composite_suspend(struct usb_gadget *gadget) + + cdev->suspended = 1; + ++ usb_gadget_set_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, 2); + } + +@@ -2307,6 +2313,9 @@ void composite_resume(struct usb_gadget *gadget) + else + maxpower = min(maxpower, 900U); + ++ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) ++ usb_gadget_clear_selfpowered(gadget); ++ + usb_gadget_vbus_draw(gadget, maxpower); + } + +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index bdac92d3a8d0..f63850efc6e3 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1120,6 +1120,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) + + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + if (unlikely(ret)) { ++ io_data->req = NULL; + usb_ep_free_request(ep->ep, req); + goto error_lock; + } +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index dbac0fa9748d..fe38275363e0 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1157,8 +1157,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + xhci_dbg(xhci, "Stop HCD\n"); + xhci_halt(xhci); + xhci_zero_64b_regs(xhci); +- xhci_reset(xhci); ++ retval = xhci_reset(xhci); + spin_unlock_irq(&xhci->lock); ++ if (retval) ++ return retval; + xhci_cleanup_msix(xhci); + + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); +diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c +index 3370b3fc37b1..bd374cea3ba6 100644 +--- a/drivers/usb/typec/ucsi/ucsi_ccg.c ++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c +@@ -1032,6 +1032,7 @@ static int ccg_restart(struct ucsi_ccg *uc) + return status; + } + ++ pm_runtime_enable(uc->dev); + return 0; + } + +@@ -1047,6 +1048,7 @@ static void ccg_update_firmware(struct work_struct *work) + + if (flash_mode != FLASH_NOT_NEEDED) { + ucsi_unregister(uc->ucsi); ++ pm_runtime_disable(uc->dev); + free_irq(uc->irq, uc); + + ccg_fw_update(uc, flash_mode); +diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c +index ae1a5eb98620..1e2769010089 100644 +--- a/drivers/vfio/platform/vfio_platform.c ++++ b/drivers/vfio/platform/vfio_platform.c +@@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i) + { + struct platform_device *pdev = (struct platform_device *) vdev->opaque; + +- return platform_get_irq(pdev, i); ++ return platform_get_irq_optional(pdev, i); + } + + static int vfio_platform_probe(struct platform_device *pdev) +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c +index 27a879eaa5a4..1ecc67da6c1a 100644 +--- a/fs/afs/rxrpc.c ++++ b/fs/afs/rxrpc.c +@@ -414,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) + afs_wake_up_async_call : + afs_wake_up_call_waiter), + call->upgrade, +- call->intr, ++ (call->intr ? RXRPC_PREINTERRUPTIBLE : ++ RXRPC_UNINTERRUPTIBLE), + call->debug_id); + if (IS_ERR(rxcall)) { + ret = PTR_ERR(rxcall); +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c +index 1d32a07bb2d1..309516e6a968 100644 +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -395,3 +395,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work) + { + set_bit(WORK_HIGH_PRIO_BIT, &work->flags); + } ++ ++void btrfs_flush_workqueue(struct btrfs_workqueue *wq) ++{ ++ if (wq->high) ++ flush_workqueue(wq->high->normal_wq); ++ ++ flush_workqueue(wq->normal->normal_wq); ++} +diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h +index a4434301d84d..3204daa51b95 100644 +--- a/fs/btrfs/async-thread.h ++++ b/fs/btrfs/async-thread.h +@@ -44,5 +44,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work); + struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work); + struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); + bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); ++void btrfs_flush_workqueue(struct btrfs_workqueue *wq); + + #endif +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index d3e15e1d4a91..18509746208b 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -6,6 +6,7 @@ + + #include <linux/slab.h> + #include <linux/iversion.h> ++#include <linux/sched/mm.h> + #include "misc.h" + #include "delayed-inode.h" + #include "disk-io.h" +@@ -805,11 +806,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + struct btrfs_delayed_item *delayed_item) + { + struct extent_buffer *leaf; ++ unsigned int nofs_flag; + char *ptr; + int ret; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0 && ret != -EEXIST) + return ret; + +@@ -937,6 +941,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) + { + struct btrfs_delayed_item *curr, *prev; ++ unsigned int nofs_flag; + int ret = 0; + + do_again: +@@ -945,7 +950,9 @@ do_again: + if (!curr) + goto delete_fail; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { +@@ -1012,6 +1019,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; ++ unsigned int nofs_flag; + int mod; + int ret; + +@@ -1024,7 +1032,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + else + mod = 1; + ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_lookup_inode(trans, root, path, &key, mod); ++ memalloc_nofs_restore(nofs_flag); + if (ret > 0) { + btrfs_release_path(path); + return -ENOENT; +@@ -1075,7 +1085,10 @@ search: + + key.type = BTRFS_INODE_EXTREF_KEY; + key.offset = -1; ++ ++ nofs_flag = memalloc_nofs_save(); + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ++ memalloc_nofs_restore(nofs_flag); + if (ret < 0) + goto err_out; + ASSERT(ret); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 3bb4bc2c9fd1..789438ef2501 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3054,6 +3054,18 @@ int __cold open_ctree(struct super_block *sb, + if (ret) + goto fail_tree_roots; + ++ /* ++ * If we have a uuid root and we're not being told to rescan we need to ++ * check the generation here so we can set the ++ * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the ++ * transaction during a balance or the log replay without updating the ++ * uuid generation, and then if we crash we would rescan the uuid tree, ++ * even though it was perfectly fine. ++ */ ++ if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && ++ fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) ++ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); ++ + ret = btrfs_verify_dev_extents(fs_info); + if (ret) { + btrfs_err(fs_info, +@@ -3284,8 +3296,6 @@ int __cold open_ctree(struct super_block *sb, + close_ctree(fs_info); + return ret; + } +- } else { +- set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); + } + set_bit(BTRFS_FS_OPEN, &fs_info->flags); + +@@ -3986,6 +3996,19 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) + */ + btrfs_delete_unused_bgs(fs_info); + ++ /* ++ * There might be existing delayed inode workers still running ++ * and holding an empty delayed inode item. We must wait for ++ * them to complete first because they can create a transaction. ++ * This happens when someone calls btrfs_balance_delayed_items() ++ * and then a transaction commit runs the same delayed nodes ++ * before any delayed worker has done something with the nodes. ++ * We must wait for any worker here and not at transaction ++ * commit time since that could cause a deadlock. ++ * This is a very rare case. ++ */ ++ btrfs_flush_workqueue(fs_info->delayed_workers); ++ + ret = btrfs_commit_super(fs_info); + if (ret) + btrfs_err(fs_info, "commit super ret %d", ret); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 5e7d6e3463ab..9bf19ff12dd7 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -3931,6 +3931,7 @@ int btree_write_cache_pages(struct address_space *mapping, + .extent_locked = 0, + .sync_io = wbc->sync_mode == WB_SYNC_ALL, + }; ++ struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; + int ret = 0; + int done = 0; + int nr_to_write_done = 0; +@@ -4044,7 +4045,39 @@ retry: + end_write_bio(&epd, ret); + return ret; + } +- ret = flush_write_bio(&epd); ++ /* ++ * If something went wrong, don't allow any metadata write bio to be ++ * submitted. ++ * ++ * This would prevent use-after-free if we had dirty pages not ++ * cleaned up, which can still happen by fuzzed images. ++ * ++ * - Bad extent tree ++ * Allowing existing tree block to be allocated for other trees. ++ * ++ * - Log tree operations ++ * Exiting tree blocks get allocated to log tree, bumps its ++ * generation, then get cleaned in tree re-balance. ++ * Such tree block will not be written back, since it's clean, ++ * thus no WRITTEN flag set. ++ * And after log writes back, this tree block is not traced by ++ * any dirty extent_io_tree. ++ * ++ * - Offending tree block gets re-dirtied from its original owner ++ * Since it has bumped generation, no WRITTEN flag, it can be ++ * reused without COWing. This tree block will not be traced ++ * by btrfs_transaction::dirty_pages. ++ * ++ * Now such dirty tree block will not be cleaned by any dirty ++ * extent io tree. Thus we don't want to submit such wild eb ++ * if the fs already has error. ++ */ ++ if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { ++ ret = flush_write_bio(&epd); ++ } else { ++ ret = -EUCLEAN; ++ end_write_bio(&epd, ret); ++ } + return ret; + } + +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 8d47c76b7bd1..4b05a506033a 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2071,6 +2071,16 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + + btrfs_init_log_ctx(&ctx, inode); + ++ /* ++ * Set the range to full if the NO_HOLES feature is not enabled. ++ * This is to avoid missing file extent items representing holes after ++ * replaying the log. ++ */ ++ if (!btrfs_fs_incompat(fs_info, NO_HOLES)) { ++ start = 0; ++ end = LLONG_MAX; ++ } ++ + /* + * We write the dirty pages in the range and wait until they complete + * out of the ->i_mutex. If so, we can flush the dirty pages by +@@ -2125,6 +2135,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + */ + ret = start_ordered_ops(inode, start, end); + if (ret) { ++ up_write(&BTRFS_I(inode)->dio_sem); + inode_unlock(inode); + goto out; + } +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 410b791f28a5..06d4c219742f 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -1030,6 +1030,7 @@ out_add_root: + ret = qgroup_rescan_init(fs_info, 0, 1); + if (!ret) { + qgroup_rescan_zero_tracking(fs_info); ++ fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); + } +@@ -3276,7 +3277,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, + sizeof(fs_info->qgroup_rescan_progress)); + fs_info->qgroup_rescan_progress.objectid = progress_objectid; + init_completion(&fs_info->qgroup_rescan_completion); +- fs_info->qgroup_rescan_running = true; + + spin_unlock(&fs_info->qgroup_lock); + mutex_unlock(&fs_info->qgroup_rescan_lock); +@@ -3339,8 +3339,11 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) + + qgroup_rescan_zero_tracking(fs_info); + ++ mutex_lock(&fs_info->qgroup_rescan_lock); ++ fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); ++ mutex_unlock(&fs_info->qgroup_rescan_lock); + + return 0; + } +@@ -3376,9 +3379,13 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, + void + btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) + { +- if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ++ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { ++ mutex_lock(&fs_info->qgroup_rescan_lock); ++ fs_info->qgroup_rescan_running = true; + btrfs_queue_work(fs_info->qgroup_rescan_workers, + &fs_info->qgroup_rescan_work); ++ mutex_unlock(&fs_info->qgroup_rescan_lock); ++ } + } + + /* +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index da5abd62db22..e2803094aac8 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -1186,7 +1186,7 @@ out: + free_backref_node(cache, lower); + } + +- free_backref_node(cache, node); ++ remove_backref_node(cache, node); + return ERR_PTR(err); + } + ASSERT(!node || !node->detached); +@@ -1298,7 +1298,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) + if (!node) + return -ENOMEM; + +- node->bytenr = root->node->start; ++ node->bytenr = root->commit_root->start; + node->data = root; + + spin_lock(&rc->reloc_root_tree.lock); +@@ -1329,10 +1329,11 @@ static void __del_reloc_root(struct btrfs_root *root) + if (rc && root->node) { + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, +- root->node->start); ++ root->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); ++ RB_CLEAR_NODE(&node->rb_node); + } + spin_unlock(&rc->reloc_root_tree.lock); + if (!node) +@@ -1350,7 +1351,7 @@ static void __del_reloc_root(struct btrfs_root *root) + * helper to update the 'address of tree root -> reloc tree' + * mapping + */ +-static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) ++static int __update_reloc_root(struct btrfs_root *root) + { + struct btrfs_fs_info *fs_info = root->fs_info; + struct rb_node *rb_node; +@@ -1359,7 +1360,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) + + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, +- root->node->start); ++ root->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); +@@ -1371,7 +1372,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) + BUG_ON((struct btrfs_root *)node->data != root); + + spin_lock(&rc->reloc_root_tree.lock); +- node->bytenr = new_bytenr; ++ node->bytenr = root->node->start; + rb_node = tree_insert(&rc->reloc_root_tree.rb_root, + node->bytenr, &node->rb_node); + spin_unlock(&rc->reloc_root_tree.lock); +@@ -1529,6 +1530,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, + } + + if (reloc_root->commit_root != reloc_root->node) { ++ __update_reloc_root(reloc_root); + btrfs_set_root_node(root_item, reloc_root->node); + free_extent_buffer(reloc_root->commit_root); + reloc_root->commit_root = btrfs_root_node(reloc_root); +@@ -2561,7 +2563,21 @@ out: + free_reloc_roots(&reloc_roots); + } + +- BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); ++ /* ++ * We used to have ++ * ++ * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); ++ * ++ * here, but it's wrong. If we fail to start the transaction in ++ * prepare_to_merge() we will have only 0 ref reloc roots, none of which ++ * have actually been removed from the reloc_root_tree rb tree. This is ++ * fine because we're bailing here, and we hold a reference on the root ++ * for the list that holds it, so these roots will be cleaned up when we ++ * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root ++ * will be cleaned up on unmount. ++ * ++ * The remaining nodes will be cleaned up by free_reloc_control. ++ */ + } + + static void free_block_list(struct rb_root *blocks) +@@ -3161,9 +3177,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, + ret = relocate_tree_block(trans, rc, node, &block->key, + path); + if (ret < 0) { +- if (ret != -EAGAIN || &block->rb_node == rb_first(blocks)) +- err = ret; +- goto out; ++ err = ret; ++ break; + } + } + out: +@@ -4137,12 +4152,6 @@ restart: + if (!RB_EMPTY_ROOT(&blocks)) { + ret = relocate_tree_blocks(trans, rc, &blocks); + if (ret < 0) { +- /* +- * if we fail to relocate tree blocks, force to update +- * backref cache when committing transaction. +- */ +- rc->backref_cache.last_trans = trans->transid - 1; +- + if (ret != -EAGAIN) { + err = ret; + break; +@@ -4212,10 +4221,10 @@ restart: + goto out_free; + } + btrfs_commit_transaction(trans); ++out_free: + ret = clean_dirty_subvols(rc); + if (ret < 0 && !err) + err = ret; +-out_free: + btrfs_free_block_rsv(fs_info, rc->block_rsv); + btrfs_free_path(path); + return err; +@@ -4572,9 +4581,8 @@ int btrfs_recover_relocation(struct btrfs_root *root) + + trans = btrfs_join_transaction(rc->extent_root); + if (IS_ERR(trans)) { +- unset_reloc_control(rc); + err = PTR_ERR(trans); +- goto out_free; ++ goto out_unset; + } + + rc->merge_reloc_tree = 1; +@@ -4594,7 +4602,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) + if (IS_ERR(fs_root)) { + err = PTR_ERR(fs_root); + list_add_tail(&reloc_root->root_list, &reloc_roots); +- goto out_free; ++ goto out_unset; + } + + err = __add_reloc_root(reloc_root); +@@ -4604,7 +4612,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) + + err = btrfs_commit_transaction(trans); + if (err) +- goto out_free; ++ goto out_unset; + + merge_reloc_roots(rc); + +@@ -4613,14 +4621,15 @@ int btrfs_recover_relocation(struct btrfs_root *root) + trans = btrfs_join_transaction(rc->extent_root); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); +- goto out_free; ++ goto out_clean; + } + err = btrfs_commit_transaction(trans); +- ++out_clean: + ret = clean_dirty_subvols(rc); + if (ret < 0 && !err) + err = ret; +-out_free: ++out_unset: ++ unset_reloc_control(rc); + kfree(rc); + out: + if (!list_empty(&reloc_roots)) +@@ -4708,11 +4717,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, + BUG_ON(rc->stage == UPDATE_DATA_PTRS && + root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); + +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { +- if (buf == root->node) +- __update_reloc_root(root, cow->start); +- } +- + level = btrfs_header_level(buf); + if (btrfs_header_generation(buf) <= + btrfs_root_last_snapshot(&root->root_item)) +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index dc195435519b..7065ceb52d78 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -3842,7 +3842,7 @@ again: + if (rc == -ENODATA) + rc = 0; + +- ctx->rc = (rc == 0) ? ctx->total_len : rc; ++ ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; + + mutex_unlock(&ctx->aio_mutex); + +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index e9a7536c2a5e..422036228eb7 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -2517,25 +2517,26 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) + + /* + * Attempt to flush data before changing attributes. We need to do +- * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the +- * ownership or mode then we may also need to do this. Here, we take +- * the safe way out and just do the flush on all setattr requests. If +- * the flush returns error, store it to report later and continue. ++ * this for ATTR_SIZE and ATTR_MTIME. If the flush of the data ++ * returns error, store it to report later and continue. + * + * BB: This should be smarter. Why bother flushing pages that + * will be truncated anyway? Also, should we error out here if +- * the flush returns error? ++ * the flush returns error? Do we need to check for ATTR_MTIME_SET flag? + */ +- rc = filemap_write_and_wait(inode->i_mapping); +- if (is_interrupt_error(rc)) { +- rc = -ERESTARTSYS; +- goto cifs_setattr_exit; ++ if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) { ++ rc = filemap_write_and_wait(inode->i_mapping); ++ if (is_interrupt_error(rc)) { ++ rc = -ERESTARTSYS; ++ goto cifs_setattr_exit; ++ } ++ mapping_set_error(inode->i_mapping, rc); + } + +- mapping_set_error(inode->i_mapping, rc); + rc = 0; + +- if (attrs->ia_valid & ATTR_MTIME) { ++ if ((attrs->ia_valid & ATTR_MTIME) && ++ !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { + rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile); + if (!rc) { + tcon = tlink_tcon(wfile->tlink); +diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c +index 18eeeb093a68..331d4071a856 100644 +--- a/fs/debugfs/file.c ++++ b/fs/debugfs/file.c +@@ -175,8 +175,13 @@ static int open_proxy_open(struct inode *inode, struct file *filp) + if (r) + goto out; + +- real_fops = fops_get(real_fops); +- if (!real_fops) { ++ if (!fops_get(real_fops)) { ++#ifdef MODULE ++ if (real_fops->owner && ++ real_fops->owner->state == MODULE_STATE_GOING) ++ goto out; ++#endif ++ + /* Huh? Module did not clean up after itself at exit? */ + WARN(1, "debugfs file owner did not clean up at exit: %pd", + dentry); +@@ -305,8 +310,13 @@ static int full_proxy_open(struct inode *inode, struct file *filp) + if (r) + goto out; + +- real_fops = fops_get(real_fops); +- if (!real_fops) { ++ if (!fops_get(real_fops)) { ++#ifdef MODULE ++ if (real_fops->owner && ++ real_fops->owner->state == MODULE_STATE_GOING) ++ goto out; ++#endif ++ + /* Huh? Module did not cleanup after itself at exit? */ + WARN(1, "debugfs file owner did not clean up at exit: %pd", + dentry); +diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c +index 1e8e1450d5b0..2a09c1e8e539 100644 +--- a/fs/erofs/utils.c ++++ b/fs/erofs/utils.c +@@ -293,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + +- freed += erofs_shrink_workstation(sbi, nr); ++ freed += erofs_shrink_workstation(sbi, nr - freed); + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ +diff --git a/fs/exec.c b/fs/exec.c +index 74d88dab98dd..ea16e3a6ae19 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1382,7 +1382,7 @@ void setup_new_exec(struct linux_binprm * bprm) + + /* An exec changes our domain. We are no longer part of the thread + group */ +- current->self_exec_id++; ++ WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); + flush_signal_handlers(current, 0); + } + EXPORT_SYMBOL(setup_new_exec); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 74a941e920cf..5f90bef8c83e 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4783,7 +4783,7 @@ static int ext4_inode_blocks_set(handle_t *handle, + struct ext4_inode_info *ei) + { + struct inode *inode = &(ei->vfs_inode); +- u64 i_blocks = inode->i_blocks; ++ u64 i_blocks = READ_ONCE(inode->i_blocks); + struct super_block *sb = inode->i_sb; + + if (i_blocks <= ~0U) { +diff --git a/fs/filesystems.c b/fs/filesystems.c +index 9135646e41ac..5e1a19013373 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -271,7 +271,9 @@ struct file_system_type *get_fs_type(const char *name) + fs = __get_fs_type(name, len); + if (!fs && (request_module("fs-%.*s", len, name) == 0)) { + fs = __get_fs_type(name, len); +- WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); ++ if (!fs) ++ pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", ++ len, name); + } + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index b7123de7c180..a5e145d4e991 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -645,6 +645,9 @@ __acquires(&gl->gl_lockref.lock) + goto out_unlock; + if (nonblock) + goto out_sched; ++ smp_mb(); ++ if (atomic_read(&gl->gl_revokes) != 0) ++ goto out_sched; + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; +diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c +index 4ede1f18de85..41542ef428f1 100644 +--- a/fs/gfs2/glops.c ++++ b/fs/gfs2/glops.c +@@ -89,8 +89,32 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) + INIT_LIST_HEAD(&tr.tr_databuf); + tr.tr_revokes = atomic_read(&gl->gl_ail_count); + +- if (!tr.tr_revokes) ++ if (!tr.tr_revokes) { ++ bool have_revokes; ++ bool log_in_flight; ++ ++ /* ++ * We have nothing on the ail, but there could be revokes on ++ * the sdp revoke queue, in which case, we still want to flush ++ * the log and wait for it to finish. ++ * ++ * If the sdp revoke list is empty too, we might still have an ++ * io outstanding for writing revokes, so we should wait for ++ * it before returning. ++ * ++ * If none of these conditions are true, our revokes are all ++ * flushed and we can return. ++ */ ++ gfs2_log_lock(sdp); ++ have_revokes = !list_empty(&sdp->sd_log_revokes); ++ log_in_flight = atomic_read(&sdp->sd_log_in_flight); ++ gfs2_log_unlock(sdp); ++ if (have_revokes) ++ goto flush; ++ if (log_in_flight) ++ log_flush_wait(sdp); + return; ++ } + + /* A shortened, inline version of gfs2_trans_begin() + * tr->alloced is not set since the transaction structure is +@@ -105,6 +129,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) + __gfs2_ail_flush(gl, 0, tr.tr_revokes); + + gfs2_trans_end(sdp); ++flush: + gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_AIL_EMPTY_GL); + } +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c +index eb3f2e7b8085..99b33c6f8440 100644 +--- a/fs/gfs2/log.c ++++ b/fs/gfs2/log.c +@@ -516,7 +516,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) + } + + +-static void log_flush_wait(struct gfs2_sbd *sdp) ++void log_flush_wait(struct gfs2_sbd *sdp) + { + DEFINE_WAIT(wait); + +diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h +index 2ff163a8dce1..76cb79f22599 100644 +--- a/fs/gfs2/log.h ++++ b/fs/gfs2/log.h +@@ -73,6 +73,7 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, + u32 type); + extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); + extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc); ++extern void log_flush_wait(struct gfs2_sbd *sdp); + + extern int gfs2_logd(void *data); + extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); +diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c +index e6d554476db4..eeebe80c6be4 100644 +--- a/fs/hfsplus/attributes.c ++++ b/fs/hfsplus/attributes.c +@@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, + return -ENOENT; + } + ++ /* Avoid btree corruption */ ++ hfs_bnode_read(fd->bnode, fd->search_key, ++ fd->keyoffset, fd->keylength); ++ + err = hfs_brec_remove(fd); + if (err) + return err; +diff --git a/fs/io_uring.c b/fs/io_uring.c +index faa0198c99ff..b9777ce36a9b 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -432,6 +432,7 @@ struct io_kiocb { + #define REQ_F_INFLIGHT 16384 /* on inflight list */ + #define REQ_F_COMP_LOCKED 32768 /* completion under lock */ + #define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */ ++ unsigned long fsize; + u64 user_data; + u32 result; + u32 sequence; +@@ -1899,6 +1900,8 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + if (unlikely(!(req->file->f_mode & FMODE_WRITE))) + return -EBADF; + ++ req->fsize = rlimit(RLIMIT_FSIZE); ++ + if (!req->io) + return 0; + +@@ -1970,10 +1973,17 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, + } + kiocb->ki_flags |= IOCB_WRITE; + ++ if (!force_nonblock) ++ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; ++ + if (req->file->f_op->write_iter) + ret2 = call_write_iter(req->file, kiocb, &iter); + else + ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter); ++ ++ if (!force_nonblock) ++ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; ++ + /* + * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just + * retry them without IOCB_NOWAIT. +@@ -4162,13 +4172,6 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) + struct sk_buff *skb; + int i, nr_files; + +- if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { +- unsigned long inflight = ctx->user->unix_inflight + nr; +- +- if (inflight > task_rlimit(current, RLIMIT_NOFILE)) +- return -EMFILE; +- } +- + fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); + if (!fpl) + return -ENOMEM; +diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c +index 5e0e9d29f5c5..0c5db1760741 100644 +--- a/fs/nfs/namespace.c ++++ b/fs/nfs/namespace.c +@@ -143,6 +143,7 @@ struct vfsmount *nfs_d_automount(struct path *path) + struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); + struct nfs_fh *fh = NULL; + struct nfs_fattr *fattr = NULL; ++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); + + if (IS_ROOT(path->dentry)) + return ERR_PTR(-ESTALE); +@@ -157,12 +158,12 @@ struct vfsmount *nfs_d_automount(struct path *path) + if (IS_ERR(mnt)) + goto out; + +- if (nfs_mountpoint_expiry_timeout < 0) ++ mntget(mnt); /* prevent immediate expiration */ ++ if (timeout <= 0) + goto out; + +- mntget(mnt); /* prevent immediate expiration */ + mnt_set_expiry(mnt, &nfs_automount_list); +- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); ++ schedule_delayed_work(&nfs_automount_task, timeout); + + out: + nfs_free_fattr(fattr); +@@ -201,10 +202,11 @@ const struct inode_operations nfs_referral_inode_operations = { + static void nfs_expire_automounts(struct work_struct *work) + { + struct list_head *list = &nfs_automount_list; ++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); + + mark_mounts_for_expiry(list); +- if (!list_empty(list)) +- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); ++ if (!list_empty(list) && timeout > 0) ++ schedule_delayed_work(&nfs_automount_task, timeout); + } + + void nfs_release_automount_timer(void) +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 20b3717cd7ca..8b7c525dbbf7 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -1177,38 +1177,38 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + if (desc->pg_error < 0) + goto out_failed; + +- for (midx = 0; midx < desc->pg_mirror_count; midx++) { +- if (midx) { +- nfs_page_group_lock(req); +- +- /* find the last request */ +- for (lastreq = req->wb_head; +- lastreq->wb_this_page != req->wb_head; +- lastreq = lastreq->wb_this_page) +- ; +- +- dupreq = nfs_create_subreq(req, lastreq, +- pgbase, offset, bytes); +- +- nfs_page_group_unlock(req); +- if (IS_ERR(dupreq)) { +- desc->pg_error = PTR_ERR(dupreq); +- goto out_failed; +- } +- } else +- dupreq = req; ++ /* Create the mirror instances first, and fire them off */ ++ for (midx = 1; midx < desc->pg_mirror_count; midx++) { ++ nfs_page_group_lock(req); ++ ++ /* find the last request */ ++ for (lastreq = req->wb_head; ++ lastreq->wb_this_page != req->wb_head; ++ lastreq = lastreq->wb_this_page) ++ ; ++ ++ dupreq = nfs_create_subreq(req, lastreq, ++ pgbase, offset, bytes); ++ ++ nfs_page_group_unlock(req); ++ if (IS_ERR(dupreq)) { ++ desc->pg_error = PTR_ERR(dupreq); ++ goto out_failed; ++ } + +- if (nfs_pgio_has_mirroring(desc)) +- desc->pg_mirror_idx = midx; ++ desc->pg_mirror_idx = midx; + if (!nfs_pageio_add_request_mirror(desc, dupreq)) + goto out_cleanup_subreq; + } + ++ desc->pg_mirror_idx = 0; ++ if (!nfs_pageio_add_request_mirror(desc, req)) ++ goto out_failed; ++ + return 1; + + out_cleanup_subreq: +- if (req != dupreq) +- nfs_pageio_cleanup_request(desc, dupreq); ++ nfs_pageio_cleanup_request(desc, dupreq); + out_failed: + nfs_pageio_error_cleanup(desc); + return 0; +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 913eb37c249b..58c8317dd7d8 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -441,6 +441,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, + } + + subreq->wb_head = subreq; ++ nfs_release_request(old_head); + + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { + nfs_release_request(subreq); +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c +index 11b42c523f04..d77c5261c03c 100644 +--- a/fs/nfsd/nfsctl.c ++++ b/fs/nfsd/nfsctl.c +@@ -1333,6 +1333,7 @@ void nfsd_client_rmdir(struct dentry *dentry) + dget(dentry); + ret = simple_rmdir(dir, dentry); + WARN_ON_ONCE(ret); ++ fsnotify_rmdir(dir, dentry); + d_delete(dentry); + inode_unlock(dir); + } +diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c +index 88534eb0e7c2..3d5b6b989db2 100644 +--- a/fs/ocfs2/alloc.c ++++ b/fs/ocfs2/alloc.c +@@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inline_data *idata = &di->id2.i_data; + ++ /* No need to punch hole beyond i_size. */ ++ if (start >= i_size_read(inode)) ++ return 0; ++ + if (end > i_size_read(inode)) + end = i_size_read(inode); + +diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c +index 7fbe8f058220..d99b5d39aa90 100644 +--- a/fs/pstore/inode.c ++++ b/fs/pstore/inode.c +@@ -87,11 +87,11 @@ static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos) + struct pstore_private *ps = s->private; + struct pstore_ftrace_seq_data *data = v; + ++ (*pos)++; + data->off += REC_SIZE; + if (data->off + REC_SIZE > ps->total_size) + return NULL; + +- (*pos)++; + return data; + } + +@@ -101,6 +101,9 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v) + struct pstore_ftrace_seq_data *data = v; + struct pstore_ftrace_record *rec; + ++ if (!data) ++ return 0; ++ + rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off); + + seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %ps <- %pS\n", +diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c +index d896457e7c11..408277ee3cdb 100644 +--- a/fs/pstore/platform.c ++++ b/fs/pstore/platform.c +@@ -823,9 +823,9 @@ static int __init pstore_init(void) + + ret = pstore_init_fs(); + if (ret) +- return ret; ++ free_buf_for_compression(); + +- return 0; ++ return ret; + } + late_initcall(pstore_init); + +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h +index 546e6adfeced..7e28cf6268b1 100644 +--- a/include/acpi/acpixf.h ++++ b/include/acpi/acpixf.h +@@ -752,7 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) +-ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) ++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number)) + ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) + + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 1ca2baf817ed..94cda8c3b5d1 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -138,12 +138,18 @@ static inline void get_online_cpus(void) { cpus_read_lock(); } + static inline void put_online_cpus(void) { cpus_read_unlock(); } + + #ifdef CONFIG_PM_SLEEP_SMP +-extern int freeze_secondary_cpus(int primary); ++int __freeze_secondary_cpus(int primary, bool suspend); ++static inline int freeze_secondary_cpus(int primary) ++{ ++ return __freeze_secondary_cpus(primary, true); ++} ++ + static inline int disable_nonboot_cpus(void) + { +- return freeze_secondary_cpus(0); ++ return __freeze_secondary_cpus(0, false); + } +-extern void enable_nonboot_cpus(void); ++ ++void enable_nonboot_cpus(void); + + static inline int suspend_disable_secondary_cpus(void) + { +diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h +index 4635f95000a4..79a6e37a1d6f 100644 +--- a/include/linux/devfreq_cooling.h ++++ b/include/linux/devfreq_cooling.h +@@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); + + #else /* !CONFIG_DEVFREQ_THERMAL */ + +-struct thermal_cooling_device * ++static inline struct thermal_cooling_device * + of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, + struct devfreq_cooling_power *dfc_power) + { +diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h +index dba15ca8e60b..1dcd9198beb7 100644 +--- a/include/linux/iocontext.h ++++ b/include/linux/iocontext.h +@@ -8,6 +8,7 @@ + + enum { + ICQ_EXITED = 1 << 2, ++ ICQ_DESTROYED = 1 << 3, + }; + + /* +diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h +index 6d0d70f3219c..10f81629b9ce 100644 +--- a/include/linux/nvme-fc-driver.h ++++ b/include/linux/nvme-fc-driver.h +@@ -270,8 +270,6 @@ struct nvme_fc_remote_port { + * + * Host/Initiator Transport Entrypoints/Parameters: + * +- * @module: The LLDD module using the interface +- * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the +@@ -385,8 +383,6 @@ struct nvme_fc_remote_port { + * Value is Mandatory. Allowed to be zero. + */ + struct nvme_fc_port_template { +- struct module *module; +- + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); +diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h +index 56f1846b9d39..c8e39607dbb7 100644 +--- a/include/linux/pci-epc.h ++++ b/include/linux/pci-epc.h +@@ -71,6 +71,7 @@ struct pci_epc_ops { + * @bitmap: bitmap to manage the PCI address space + * @pages: number of bits representing the address region + * @page_size: size of each page ++ * @lock: mutex to protect bitmap + */ + struct pci_epc_mem { + phys_addr_t phys_base; +@@ -78,6 +79,8 @@ struct pci_epc_mem { + unsigned long *bitmap; + size_t page_size; + int pages; ++ /* mutex to protect against concurrent access for memory allocation*/ ++ struct mutex lock; + }; + + /** +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 716ad1d8d95e..cae52b0e9ff3 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -939,8 +939,8 @@ struct task_struct { + struct seccomp seccomp; + + /* Thread group tracking: */ +- u32 parent_exec_id; +- u32 self_exec_id; ++ u64 parent_exec_id; ++ u64 self_exec_id; + + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; +diff --git a/include/linux/xarray.h b/include/linux/xarray.h +index f73e1775ded0..51bc10d5f6a8 100644 +--- a/include/linux/xarray.h ++++ b/include/linux/xarray.h +@@ -1648,6 +1648,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, + xa_mark_t mark) + { + struct xa_node *node = xas->xa_node; ++ void *entry; + unsigned int offset; + + if (unlikely(xas_not_node(node) || node->shift)) +@@ -1659,7 +1660,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, + return NULL; + if (offset == XA_CHUNK_SIZE) + return xas_find_marked(xas, max, mark); +- return xa_entry(xas->xa, node, offset); ++ entry = xa_entry(xas->xa, node, offset); ++ if (!entry) ++ return xas_find_marked(xas, max, mark); ++ return entry; + } + + /* +diff --git a/include/media/rc-map.h b/include/media/rc-map.h +index f99575a0d29c..d22810dcd85c 100644 +--- a/include/media/rc-map.h ++++ b/include/media/rc-map.h +@@ -274,6 +274,7 @@ struct rc_map *rc_map_get(const char *name); + #define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" + #define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" + #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" ++#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" + #define RC_MAP_WETEK_HUB "rc-wetek-hub" + #define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" + #define RC_MAP_WINFAST "rc-winfast" +diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h +index 299240df79e4..04e97bab6f28 100644 +--- a/include/net/af_rxrpc.h ++++ b/include/net/af_rxrpc.h +@@ -16,6 +16,12 @@ struct sock; + struct socket; + struct rxrpc_call; + ++enum rxrpc_interruptibility { ++ RXRPC_INTERRUPTIBLE, /* Call is interruptible */ ++ RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ ++ RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ ++}; ++ + /* + * Debug ID counter for tracing. + */ +@@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, + gfp_t, + rxrpc_notify_rx_t, + bool, +- bool, ++ enum rxrpc_interruptibility, + unsigned int); + int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, + struct msghdr *, size_t, +diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h +index 697e2c0624dc..50e53387486b 100644 +--- a/include/trace/events/rcu.h ++++ b/include/trace/events/rcu.h +@@ -720,6 +720,7 @@ TRACE_EVENT_RCU(rcu_torture_read, + * "Begin": rcu_barrier() started. + * "EarlyExit": rcu_barrier() piggybacked, thus early exit. + * "Inc1": rcu_barrier() piggyback check counter incremented. ++ * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks. + * "OnlineQ": rcu_barrier() found online CPU with callbacks. + * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. + * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 79f38a281390..f6476c4e9037 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5550,6 +5550,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) + reg->smax_value <= 0 && reg->smin_value >= S32_MIN); + } + ++/* Constrain the possible values of @reg with unsigned upper bound @bound. ++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. ++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits ++ * of @reg. ++ */ ++static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, ++ bool is_exclusive) ++{ ++ if (is_exclusive) { ++ /* There are no values for `reg` that make `reg<0` true. */ ++ if (bound == 0) ++ return; ++ bound--; ++ } ++ if (is_jmp32) { ++ /* Constrain the register's value in the tnum representation. ++ * For 64-bit comparisons this happens later in ++ * __reg_bound_offset(), but for 32-bit comparisons, we can be ++ * more precise than what can be derived from the updated ++ * numeric bounds. ++ */ ++ struct tnum t = tnum_range(0, bound); ++ ++ t.mask |= ~0xffffffffULL; /* upper half is unknown */ ++ reg->var_off = tnum_intersect(reg->var_off, t); ++ ++ /* Compute the 64-bit bound from the 32-bit bound. */ ++ bound += gen_hi_max(reg->var_off); ++ } ++ reg->umax_value = min(reg->umax_value, bound); ++} ++ ++/* Constrain the possible values of @reg with unsigned lower bound @bound. ++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. ++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits ++ * of @reg. ++ */ ++static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, ++ bool is_exclusive) ++{ ++ if (is_exclusive) { ++ /* There are no values for `reg` that make `reg>MAX` true. */ ++ if (bound == (is_jmp32 ? U32_MAX : U64_MAX)) ++ return; ++ bound++; ++ } ++ if (is_jmp32) { ++ /* Constrain the register's value in the tnum representation. ++ * For 64-bit comparisons this happens later in ++ * __reg_bound_offset(), but for 32-bit comparisons, we can be ++ * more precise than what can be derived from the updated ++ * numeric bounds. ++ */ ++ struct tnum t = tnum_range(bound, U32_MAX); ++ ++ t.mask |= ~0xffffffffULL; /* upper half is unknown */ ++ reg->var_off = tnum_intersect(reg->var_off, t); ++ ++ /* Compute the 64-bit bound from the 32-bit bound. */ ++ bound += gen_hi_min(reg->var_off); ++ } ++ reg->umin_value = max(reg->umin_value, bound); ++} ++ + /* Adjusts the register min/max values in the case that the dst_reg is the + * variable register that we are working on, and src_reg is a constant or we're + * simply doing a BPF_K check. +@@ -5605,15 +5669,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, + case BPF_JGE: + case BPF_JGT: + { +- u64 false_umax = opcode == BPF_JGT ? val : val - 1; +- u64 true_umin = opcode == BPF_JGT ? val + 1 : val; +- +- if (is_jmp32) { +- false_umax += gen_hi_max(false_reg->var_off); +- true_umin += gen_hi_min(true_reg->var_off); +- } +- false_reg->umax_value = min(false_reg->umax_value, false_umax); +- true_reg->umin_value = max(true_reg->umin_value, true_umin); ++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); ++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); + break; + } + case BPF_JSGE: +@@ -5634,15 +5691,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, + case BPF_JLE: + case BPF_JLT: + { +- u64 false_umin = opcode == BPF_JLT ? val : val + 1; +- u64 true_umax = opcode == BPF_JLT ? val - 1 : val; +- +- if (is_jmp32) { +- false_umin += gen_hi_min(false_reg->var_off); +- true_umax += gen_hi_max(true_reg->var_off); +- } +- false_reg->umin_value = max(false_reg->umin_value, false_umin); +- true_reg->umax_value = min(true_reg->umax_value, true_umax); ++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); ++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); + break; + } + case BPF_JSLE: +@@ -5717,15 +5767,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, + case BPF_JGE: + case BPF_JGT: + { +- u64 false_umin = opcode == BPF_JGT ? val : val + 1; +- u64 true_umax = opcode == BPF_JGT ? val - 1 : val; +- +- if (is_jmp32) { +- false_umin += gen_hi_min(false_reg->var_off); +- true_umax += gen_hi_max(true_reg->var_off); +- } +- false_reg->umin_value = max(false_reg->umin_value, false_umin); +- true_reg->umax_value = min(true_reg->umax_value, true_umax); ++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); ++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); + break; + } + case BPF_JSGE: +@@ -5743,15 +5786,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, + case BPF_JLE: + case BPF_JLT: + { +- u64 false_umax = opcode == BPF_JLT ? val : val - 1; +- u64 true_umin = opcode == BPF_JLT ? val + 1 : val; +- +- if (is_jmp32) { +- false_umax += gen_hi_max(false_reg->var_off); +- true_umin += gen_hi_min(true_reg->var_off); +- } +- false_reg->umax_value = min(false_reg->umax_value, false_umax); +- true_reg->umin_value = max(true_reg->umin_value, true_umin); ++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); ++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); + break; + } + case BPF_JSLE: +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 9c706af713fb..c8e661ee26d3 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -1212,7 +1212,7 @@ EXPORT_SYMBOL_GPL(cpu_up); + #ifdef CONFIG_PM_SLEEP_SMP + static cpumask_var_t frozen_cpus; + +-int freeze_secondary_cpus(int primary) ++int __freeze_secondary_cpus(int primary, bool suspend) + { + int cpu, error = 0; + +@@ -1237,7 +1237,7 @@ int freeze_secondary_cpus(int primary) + if (cpu == primary) + continue; + +- if (pm_wakeup_pending()) { ++ if (suspend && pm_wakeup_pending()) { + pr_info("Wakeup pending. Abort CPU freeze\n"); + error = -EBUSY; + break; +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c +index 12ff766ec1fa..98e3d873792e 100644 +--- a/kernel/dma/mapping.c ++++ b/kernel/dma/mapping.c +@@ -154,6 +154,8 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs); + */ + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) + { ++ if (force_dma_unencrypted(dev)) ++ prot = pgprot_decrypted(prot); + if (dev_is_dma_coherent(dev) || + (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && + (attrs & DMA_ATTR_NON_CONSISTENT))) +diff --git a/kernel/events/core.c b/kernel/events/core.c +index fdb7f7ef380c..78068b57cbba 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -935,16 +935,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) + event->shadow_ctx_time = now - t->timestamp; + } + +-/* +- * Update cpuctx->cgrp so that it is set when first cgroup event is added and +- * cleared when last cgroup event is removed. +- */ + static inline void +-list_update_cgroup_event(struct perf_event *event, +- struct perf_event_context *ctx, bool add) ++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) + { + struct perf_cpu_context *cpuctx; +- struct list_head *cpuctx_entry; + + if (!is_cgroup_event(event)) + return; +@@ -961,28 +955,41 @@ list_update_cgroup_event(struct perf_event *event, + * because if the first would mismatch, the second would not try again + * and we would leave cpuctx->cgrp unset. + */ +- if (add && !cpuctx->cgrp) { ++ if (ctx->is_active && !cpuctx->cgrp) { + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); + + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) + cpuctx->cgrp = cgrp; + } + +- if (add && ctx->nr_cgroups++) ++ if (ctx->nr_cgroups++) + return; +- else if (!add && --ctx->nr_cgroups) ++ ++ list_add(&cpuctx->cgrp_cpuctx_entry, ++ per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); ++} ++ ++static inline void ++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) ++{ ++ struct perf_cpu_context *cpuctx; ++ ++ if (!is_cgroup_event(event)) + return; + +- /* no cgroup running */ +- if (!add) ++ /* ++ * Because cgroup events are always per-cpu events, ++ * @ctx == &cpuctx->ctx. ++ */ ++ cpuctx = container_of(ctx, struct perf_cpu_context, ctx); ++ ++ if (--ctx->nr_cgroups) ++ return; ++ ++ if (ctx->is_active && cpuctx->cgrp) + cpuctx->cgrp = NULL; + +- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; +- if (add) +- list_add(cpuctx_entry, +- per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); +- else +- list_del(cpuctx_entry); ++ list_del(&cpuctx->cgrp_cpuctx_entry); + } + + #else /* !CONFIG_CGROUP_PERF */ +@@ -1048,11 +1055,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event) + } + + static inline void +-list_update_cgroup_event(struct perf_event *event, +- struct perf_event_context *ctx, bool add) ++perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) + { + } + ++static inline void ++perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) ++{ ++} + #endif + + /* +@@ -1682,13 +1692,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) + add_event_to_groups(event, ctx); + } + +- list_update_cgroup_event(event, ctx, true); +- + list_add_rcu(&event->event_entry, &ctx->event_list); + ctx->nr_events++; + if (event->attr.inherit_stat) + ctx->nr_stat++; + ++ if (event->state > PERF_EVENT_STATE_OFF) ++ perf_cgroup_event_enable(event, ctx); ++ + ctx->generation++; + } + +@@ -1864,8 +1875,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) + + event->attach_state &= ~PERF_ATTACH_CONTEXT; + +- list_update_cgroup_event(event, ctx, false); +- + ctx->nr_events--; + if (event->attr.inherit_stat) + ctx->nr_stat--; +@@ -1882,8 +1891,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) + * of error state is by explicit re-enabling + * of the event + */ +- if (event->state > PERF_EVENT_STATE_OFF) ++ if (event->state > PERF_EVENT_STATE_OFF) { ++ perf_cgroup_event_disable(event, ctx); + perf_event_set_state(event, PERF_EVENT_STATE_OFF); ++ } + + ctx->generation++; + } +@@ -1986,6 +1997,12 @@ static int perf_get_aux_event(struct perf_event *event, + return 1; + } + ++static inline struct list_head *get_event_list(struct perf_event *event) ++{ ++ struct perf_event_context *ctx = event->ctx; ++ return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; ++} ++ + static void perf_group_detach(struct perf_event *event) + { + struct perf_event *sibling, *tmp; +@@ -2028,12 +2045,8 @@ static void perf_group_detach(struct perf_event *event) + if (!RB_EMPTY_NODE(&event->group_node)) { + add_event_to_groups(sibling, event->ctx); + +- if (sibling->state == PERF_EVENT_STATE_ACTIVE) { +- struct list_head *list = sibling->attr.pinned ? +- &ctx->pinned_active : &ctx->flexible_active; +- +- list_add_tail(&sibling->active_list, list); +- } ++ if (sibling->state == PERF_EVENT_STATE_ACTIVE) ++ list_add_tail(&sibling->active_list, get_event_list(sibling)); + } + + WARN_ON_ONCE(sibling->ctx != event->ctx); +@@ -2112,6 +2125,7 @@ event_sched_out(struct perf_event *event, + + if (READ_ONCE(event->pending_disable) >= 0) { + WRITE_ONCE(event->pending_disable, -1); ++ perf_cgroup_event_disable(event, ctx); + state = PERF_EVENT_STATE_OFF; + } + perf_event_set_state(event, state); +@@ -2248,6 +2262,7 @@ static void __perf_event_disable(struct perf_event *event, + event_sched_out(event, cpuctx, ctx); + + perf_event_set_state(event, PERF_EVENT_STATE_OFF); ++ perf_cgroup_event_disable(event, ctx); + } + + /* +@@ -2350,6 +2365,8 @@ event_sched_in(struct perf_event *event, + { + int ret = 0; + ++ WARN_ON_ONCE(event->ctx != ctx); ++ + lockdep_assert_held(&ctx->lock); + + if (event->state <= PERF_EVENT_STATE_OFF) +@@ -2629,7 +2646,7 @@ static int __perf_install_in_context(void *info) + } + + #ifdef CONFIG_CGROUP_PERF +- if (is_cgroup_event(event)) { ++ if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { + /* + * If the current cgroup doesn't match the event's + * cgroup, we should not try to schedule it. +@@ -2789,6 +2806,7 @@ static void __perf_event_enable(struct perf_event *event, + ctx_sched_out(ctx, cpuctx, EVENT_TIME); + + perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); ++ perf_cgroup_event_enable(event, ctx); + + if (!ctx->is_active) + return; +@@ -3419,15 +3437,11 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu, + return 0; + } + +-struct sched_in_data { +- struct perf_event_context *ctx; +- struct perf_cpu_context *cpuctx; +- int can_add_hw; +-}; +- +-static int pinned_sched_in(struct perf_event *event, void *data) ++static int merge_sched_in(struct perf_event *event, void *data) + { +- struct sched_in_data *sid = data; ++ struct perf_event_context *ctx = event->ctx; ++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); ++ int *can_add_hw = data; + + if (event->state <= PERF_EVENT_STATE_OFF) + return 0; +@@ -3435,39 +3449,19 @@ static int pinned_sched_in(struct perf_event *event, void *data) + if (!event_filter_match(event)) + return 0; + +- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { +- if (!group_sched_in(event, sid->cpuctx, sid->ctx)) +- list_add_tail(&event->active_list, &sid->ctx->pinned_active); ++ if (group_can_go_on(event, cpuctx, *can_add_hw)) { ++ if (!group_sched_in(event, cpuctx, ctx)) ++ list_add_tail(&event->active_list, get_event_list(event)); + } + +- /* +- * If this pinned group hasn't been scheduled, +- * put it in error state. +- */ +- if (event->state == PERF_EVENT_STATE_INACTIVE) +- perf_event_set_state(event, PERF_EVENT_STATE_ERROR); +- +- return 0; +-} +- +-static int flexible_sched_in(struct perf_event *event, void *data) +-{ +- struct sched_in_data *sid = data; +- +- if (event->state <= PERF_EVENT_STATE_OFF) +- return 0; +- +- if (!event_filter_match(event)) +- return 0; +- +- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { +- int ret = group_sched_in(event, sid->cpuctx, sid->ctx); +- if (ret) { +- sid->can_add_hw = 0; +- sid->ctx->rotate_necessary = 1; +- return 0; ++ if (event->state == PERF_EVENT_STATE_INACTIVE) { ++ if (event->attr.pinned) { ++ perf_cgroup_event_disable(event, ctx); ++ perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + } +- list_add_tail(&event->active_list, &sid->ctx->flexible_active); ++ ++ *can_add_hw = 0; ++ ctx->rotate_necessary = 1; + } + + return 0; +@@ -3477,30 +3471,22 @@ static void + ctx_pinned_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) + { +- struct sched_in_data sid = { +- .ctx = ctx, +- .cpuctx = cpuctx, +- .can_add_hw = 1, +- }; ++ int can_add_hw = 1; + + visit_groups_merge(&ctx->pinned_groups, + smp_processor_id(), +- pinned_sched_in, &sid); ++ merge_sched_in, &can_add_hw); + } + + static void + ctx_flexible_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) + { +- struct sched_in_data sid = { +- .ctx = ctx, +- .cpuctx = cpuctx, +- .can_add_hw = 1, +- }; ++ int can_add_hw = 1; + + visit_groups_merge(&ctx->flexible_groups, + smp_processor_id(), +- flexible_sched_in, &sid); ++ merge_sched_in, &can_add_hw); + } + + static void +diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c +index a949bd39e343..d44c8fd17609 100644 +--- a/kernel/irq/debugfs.c ++++ b/kernel/irq/debugfs.c +@@ -206,8 +206,15 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, + chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, flags); + +- if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) { +- /* Can't do level nor NMIs, sorry */ ++ /* ++ * Don't allow injection when the interrupt is: ++ * - Level or NMI type ++ * - not activated ++ * - replaying already ++ */ ++ if (irq_settings_is_level(desc) || ++ !irqd_is_activated(&desc->irq_data) || ++ (desc->istate & (IRQS_NMI | IRQS_REPLAY))) { + err = -EINVAL; + } else { + desc->istate |= IRQS_PENDING; +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c +index 480df3659720..c776b8e86fbc 100644 +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -1293,6 +1293,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg) + { ++ if (!domain->ops->alloc) { ++ pr_debug("domain->ops->alloc() is NULL\n"); ++ return -ENOSYS; ++ } ++ + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); + } + +@@ -1330,11 +1335,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + return -EINVAL; + } + +- if (!domain->ops->alloc) { +- pr_debug("domain->ops->alloc() is NULL\n"); +- return -ENOSYS; +- } +- + if (realloc && irq_base >= 0) { + virq = irq_base; + } else { +diff --git a/kernel/kmod.c b/kernel/kmod.c +index bc6addd9152b..a2de58de6ab6 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -120,7 +120,7 @@ out: + * invoke it. + * + * If module auto-loading support is disabled then this function +- * becomes a no-operation. ++ * simply returns -ENOENT. + */ + int __request_module(bool wait, const char *fmt, ...) + { +@@ -137,7 +137,7 @@ int __request_module(bool wait, const char *fmt, ...) + WARN_ON_ONCE(wait && current_is_async()); + + if (!modprobe_path[0]) +- return 0; ++ return -ENOENT; + + va_start(args, fmt); + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 32406ef0d6a2..5142a6b11bf5 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -1719,9 +1719,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) + this.class = class; + + raw_local_irq_save(flags); ++ current->lockdep_recursion = 1; + arch_spin_lock(&lockdep_lock); + ret = __lockdep_count_forward_deps(&this); + arch_spin_unlock(&lockdep_lock); ++ current->lockdep_recursion = 0; + raw_local_irq_restore(flags); + + return ret; +@@ -1746,9 +1748,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) + this.class = class; + + raw_local_irq_save(flags); ++ current->lockdep_recursion = 1; + arch_spin_lock(&lockdep_lock); + ret = __lockdep_count_backward_deps(&this); + arch_spin_unlock(&lockdep_lock); ++ current->lockdep_recursion = 0; + raw_local_irq_restore(flags); + + return ret; +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 6145e08a1407..1f830bad65cf 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -2888,9 +2888,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp) + /* + * Called with preemption disabled, and from cross-cpu IRQ context. + */ +-static void rcu_barrier_func(void *unused) ++static void rcu_barrier_func(void *cpu_in) + { +- struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); ++ uintptr_t cpu = (uintptr_t)cpu_in; ++ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + + rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); + rdp->barrier_head.func = rcu_barrier_callback; +@@ -2917,7 +2918,7 @@ static void rcu_barrier_func(void *unused) + */ + void rcu_barrier(void) + { +- int cpu; ++ uintptr_t cpu; + struct rcu_data *rdp; + unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); + +@@ -2940,13 +2941,14 @@ void rcu_barrier(void) + rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); + + /* +- * Initialize the count to one rather than to zero in order to +- * avoid a too-soon return to zero in case of a short grace period +- * (or preemption of this task). Exclude CPU-hotplug operations +- * to ensure that no offline CPU has callbacks queued. ++ * Initialize the count to two rather than to zero in order ++ * to avoid a too-soon return to zero in case of an immediate ++ * invocation of the just-enqueued callback (or preemption of ++ * this task). Exclude CPU-hotplug operations to ensure that no ++ * offline non-offloaded CPU has callbacks queued. + */ + init_completion(&rcu_state.barrier_completion); +- atomic_set(&rcu_state.barrier_cpu_count, 1); ++ atomic_set(&rcu_state.barrier_cpu_count, 2); + get_online_cpus(); + + /* +@@ -2956,13 +2958,23 @@ void rcu_barrier(void) + */ + for_each_possible_cpu(cpu) { + rdp = per_cpu_ptr(&rcu_data, cpu); +- if (!cpu_online(cpu) && ++ if (cpu_is_offline(cpu) && + !rcu_segcblist_is_offloaded(&rdp->cblist)) + continue; +- if (rcu_segcblist_n_cbs(&rdp->cblist)) { ++ if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { + rcu_barrier_trace(TPS("OnlineQ"), cpu, + rcu_state.barrier_sequence); +- smp_call_function_single(cpu, rcu_barrier_func, NULL, 1); ++ smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1); ++ } else if (rcu_segcblist_n_cbs(&rdp->cblist) && ++ cpu_is_offline(cpu)) { ++ rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, ++ rcu_state.barrier_sequence); ++ local_irq_disable(); ++ rcu_barrier_func((void *)cpu); ++ local_irq_enable(); ++ } else if (cpu_is_offline(cpu)) { ++ rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu, ++ rcu_state.barrier_sequence); + } else { + rcu_barrier_trace(TPS("OnlineNQ"), cpu, + rcu_state.barrier_sequence); +@@ -2974,7 +2986,7 @@ void rcu_barrier(void) + * Now that we have an rcu_barrier_callback() callback on each + * CPU, and thus each counted, remove the initial count. + */ +- if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) ++ if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) + complete(&rcu_state.barrier_completion); + + /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 9e7768dbd92d..81b0c7ad9f9e 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3677,7 +3677,6 @@ static void sched_tick_remote(struct work_struct *work) + if (cpu_is_offline(cpu)) + goto out_unlock; + +- curr = rq->curr; + update_rq_clock(rq); + + if (!is_idle_task(curr)) { +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index d43318a489f2..df3577149d2e 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -912,8 +912,10 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) + } while (read_seqcount_retry(&vtime->seqcount, seq)); + } + +-static int vtime_state_check(struct vtime *vtime, int cpu) ++static int vtime_state_fetch(struct vtime *vtime, int cpu) + { ++ int state = READ_ONCE(vtime->state); ++ + /* + * We raced against a context switch, fetch the + * kcpustat task again. +@@ -930,10 +932,10 @@ static int vtime_state_check(struct vtime *vtime, int cpu) + * + * Case 1) is ok but 2) is not. So wait for a safe VTIME state. + */ +- if (vtime->state == VTIME_INACTIVE) ++ if (state == VTIME_INACTIVE) + return -EAGAIN; + +- return 0; ++ return state; + } + + static u64 kcpustat_user_vtime(struct vtime *vtime) +@@ -952,14 +954,15 @@ static int kcpustat_field_vtime(u64 *cpustat, + { + struct vtime *vtime = &tsk->vtime; + unsigned int seq; +- int err; + + do { ++ int state; ++ + seq = read_seqcount_begin(&vtime->seqcount); + +- err = vtime_state_check(vtime, cpu); +- if (err < 0) +- return err; ++ state = vtime_state_fetch(vtime, cpu); ++ if (state < 0) ++ return state; + + *val = cpustat[usage]; + +@@ -972,7 +975,7 @@ static int kcpustat_field_vtime(u64 *cpustat, + */ + switch (usage) { + case CPUTIME_SYSTEM: +- if (vtime->state == VTIME_SYS) ++ if (state == VTIME_SYS) + *val += vtime->stime + vtime_delta(vtime); + break; + case CPUTIME_USER: +@@ -984,11 +987,11 @@ static int kcpustat_field_vtime(u64 *cpustat, + *val += kcpustat_user_vtime(vtime); + break; + case CPUTIME_GUEST: +- if (vtime->state == VTIME_GUEST && task_nice(tsk) <= 0) ++ if (state == VTIME_GUEST && task_nice(tsk) <= 0) + *val += vtime->gtime + vtime_delta(vtime); + break; + case CPUTIME_GUEST_NICE: +- if (vtime->state == VTIME_GUEST && task_nice(tsk) > 0) ++ if (state == VTIME_GUEST && task_nice(tsk) > 0) + *val += vtime->gtime + vtime_delta(vtime); + break; + default: +@@ -1039,23 +1042,23 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, + { + struct vtime *vtime = &tsk->vtime; + unsigned int seq; +- int err; + + do { + u64 *cpustat; + u64 delta; ++ int state; + + seq = read_seqcount_begin(&vtime->seqcount); + +- err = vtime_state_check(vtime, cpu); +- if (err < 0) +- return err; ++ state = vtime_state_fetch(vtime, cpu); ++ if (state < 0) ++ return state; + + *dst = *src; + cpustat = dst->cpustat; + + /* Task is sleeping, dead or idle, nothing to add */ +- if (vtime->state < VTIME_SYS) ++ if (state < VTIME_SYS) + continue; + + delta = vtime_delta(vtime); +@@ -1064,15 +1067,15 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, + * Task runs either in user (including guest) or kernel space, + * add pending nohz time to the right place. + */ +- if (vtime->state == VTIME_SYS) { ++ if (state == VTIME_SYS) { + cpustat[CPUTIME_SYSTEM] += vtime->stime + delta; +- } else if (vtime->state == VTIME_USER) { ++ } else if (state == VTIME_USER) { + if (task_nice(tsk) > 0) + cpustat[CPUTIME_NICE] += vtime->utime + delta; + else + cpustat[CPUTIME_USER] += vtime->utime + delta; + } else { +- WARN_ON_ONCE(vtime->state != VTIME_GUEST); ++ WARN_ON_ONCE(state != VTIME_GUEST); + if (task_nice(tsk) > 0) { + cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta; + cpustat[CPUTIME_NICE] += vtime->gtime + delta; +@@ -1083,7 +1086,7 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, + } + } while (read_seqcount_retry(&vtime->seqcount, seq)); + +- return err; ++ return 0; + } + + void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu) +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 0ff2f43ac9cd..8cb749a7fbe4 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -3943,6 +3943,7 @@ static inline void check_schedstat_required(void) + #endif + } + ++static inline bool cfs_bandwidth_used(void); + + /* + * MIGRATION +@@ -4021,10 +4022,16 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) + __enqueue_entity(cfs_rq, se); + se->on_rq = 1; + +- if (cfs_rq->nr_running == 1) { ++ /* ++ * When bandwidth control is enabled, cfs might have been removed ++ * because of a parent been throttled but cfs->nr_running > 1. Try to ++ * add it unconditionnally. ++ */ ++ if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) + list_add_leaf_cfs_rq(cfs_rq); ++ ++ if (cfs_rq->nr_running == 1) + check_enqueue_throttle(cfs_rq); +- } + } + + static void __clear_buddies_last(struct sched_entity *se) +@@ -4605,11 +4612,22 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + break; + } + +- assert_list_leaf_cfs_rq(rq); +- + if (!se) + add_nr_running(rq, task_delta); + ++ /* ++ * The cfs_rq_throttled() breaks in the above iteration can result in ++ * incomplete leaf list maintenance, resulting in triggering the ++ * assertion below. ++ */ ++ for_each_sched_entity(se) { ++ cfs_rq = cfs_rq_of(se); ++ ++ list_add_leaf_cfs_rq(cfs_rq); ++ } ++ ++ assert_list_leaf_cfs_rq(rq); ++ + /* Determine whether we need to wake up potentially idle CPU: */ + if (rq->curr == rq->idle && rq->cfs.nr_running) + resched_curr(rq); +@@ -8323,7 +8341,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, + * Computing avg_load makes sense only when group is fully busy or + * overloaded + */ +- if (sgs->group_type < group_fully_busy) ++ if (sgs->group_type == group_fully_busy || ++ sgs->group_type == group_overloaded) + sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / + sgs->group_capacity; + } +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 280a3c735935..0502ea8e0e62 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -118,7 +118,13 @@ extern long calc_load_fold_active(struct rq *this_rq, long adjust); + #ifdef CONFIG_64BIT + # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) + # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) +-# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) ++# define scale_load_down(w) \ ++({ \ ++ unsigned long __w = (w); \ ++ if (__w) \ ++ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ ++ __w; \ ++}) + #else + # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) + # define scale_load(w) (w) +diff --git a/kernel/seccomp.c b/kernel/seccomp.c +index b6ea3dcb57bf..683c81e4861e 100644 +--- a/kernel/seccomp.c ++++ b/kernel/seccomp.c +@@ -1221,6 +1221,7 @@ static const struct file_operations seccomp_notify_ops = { + .poll = seccomp_notify_poll, + .release = seccomp_notify_release, + .unlocked_ioctl = seccomp_notify_ioctl, ++ .compat_ioctl = seccomp_notify_ioctl, + }; + + static struct file *init_listener(struct seccomp_filter *filter) +diff --git a/kernel/signal.c b/kernel/signal.c +index eea748174ade..7d3d35eb7a0b 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1931,7 +1931,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) + * This is only possible if parent == real_parent. + * Check if it has changed security domain. + */ +- if (tsk->parent_exec_id != tsk->parent->self_exec_id) ++ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) + sig = SIGCHLD; + } + +diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c +index dbd69052eaa6..a5538dd76a81 100644 +--- a/kernel/time/sched_clock.c ++++ b/kernel/time/sched_clock.c +@@ -207,7 +207,8 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) + + if (sched_clock_timer.function != NULL) { + /* update timeout for clock wrap */ +- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); ++ hrtimer_start(&sched_clock_timer, cd.wrap_kt, ++ HRTIMER_MODE_REL_HARD); + } + + r = rate; +@@ -251,9 +252,9 @@ void __init generic_sched_clock_init(void) + * Start the timer to keep sched_clock() properly updated and + * sets the initial epoch. + */ +- hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + sched_clock_timer.function = sched_clock_poll; +- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); ++ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); + } + + /* +@@ -290,7 +291,7 @@ void sched_clock_resume(void) + struct clock_read_data *rd = &cd.read_data[0]; + + rd->epoch_cyc = cd.actual_read_sched_clock(); +- hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); ++ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); + rd->read_sched_clock = cd.actual_read_sched_clock; + } + +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index e5ef4ae9edb5..0e553b1706d3 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -731,7 +731,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig) + if (unlikely(!nmi_uaccess_okay())) + return -EPERM; + +- if (in_nmi()) { ++ if (irqs_disabled()) { + /* Do an early check on signal validity. Otherwise, + * the error is lost in deferred irq_work. + */ +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 3f54dc2f6e1c..2f0f7fcee73e 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -918,6 +918,8 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev) + int i; + + seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); ++ if (trace_kprobe_is_return(tk) && tk->rp.maxactive) ++ seq_printf(m, "%d", tk->rp.maxactive); + seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp), + trace_probe_name(&tk->tp)); + +diff --git a/lib/test_xarray.c b/lib/test_xarray.c +index 8c7d7a8468b8..d4f97925dbd8 100644 +--- a/lib/test_xarray.c ++++ b/lib/test_xarray.c +@@ -1156,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa) + XA_BUG_ON(xa, !xa_empty(xa)); + } + ++static noinline void check_pause(struct xarray *xa) ++{ ++ XA_STATE(xas, xa, 0); ++ void *entry; ++ unsigned int order; ++ unsigned long index = 1; ++ unsigned int count = 0; ++ ++ for (order = 0; order < order_limit; order++) { ++ XA_BUG_ON(xa, xa_store_order(xa, index, order, ++ xa_mk_index(index), GFP_KERNEL)); ++ index += 1UL << order; ++ } ++ ++ rcu_read_lock(); ++ xas_for_each(&xas, entry, ULONG_MAX) { ++ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); ++ count++; ++ } ++ rcu_read_unlock(); ++ XA_BUG_ON(xa, count != order_limit); ++ ++ count = 0; ++ xas_set(&xas, 0); ++ rcu_read_lock(); ++ xas_for_each(&xas, entry, ULONG_MAX) { ++ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); ++ count++; ++ xas_pause(&xas); ++ } ++ rcu_read_unlock(); ++ XA_BUG_ON(xa, count != order_limit); ++ ++ xa_destroy(xa); ++} ++ + static noinline void check_move_tiny(struct xarray *xa) + { + XA_STATE(xas, xa, 0); +@@ -1664,6 +1700,7 @@ static int xarray_checks(void) + check_xa_alloc(); + check_find(&array); + check_find_entry(&array); ++ check_pause(&array); + check_account(&array); + check_destroy(&array); + check_move(&array); +diff --git a/lib/xarray.c b/lib/xarray.c +index acd1fad2e862..08d71c7b7599 100644 +--- a/lib/xarray.c ++++ b/lib/xarray.c +@@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas) + + xas->xa_node = XAS_RESTART; + if (node) { +- unsigned int offset = xas->xa_offset; ++ unsigned long offset = xas->xa_offset; + while (++offset < XA_CHUNK_SIZE) { + if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) + break; +@@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); ++ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) ++ continue; + if (!xa_is_node(entry)) + return entry; + xas->xa_node = xa_to_node(entry); +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 8ff9ef8503be..4c7a6e5c0912 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -2324,6 +2324,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg, + usage = page_counter_read(&memcg->memory); + high = READ_ONCE(memcg->high); + ++ if (usage <= high) ++ continue; ++ + /* + * Prevent division by 0 in overage calculation by acting as if + * it was a threshold of 1 page +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index bad0d6adcc49..15ee92d79581 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, + gfp_t gfp, + rxrpc_notify_rx_t notify_rx, + bool upgrade, +- bool intr, ++ enum rxrpc_interruptibility interruptibility, + unsigned int debug_id) + { + struct rxrpc_conn_parameters cp; +@@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, + memset(&p, 0, sizeof(p)); + p.user_call_ID = user_call_ID; + p.tx_total_len = tx_total_len; +- p.intr = intr; ++ p.interruptibility = interruptibility; + + memset(&cp, 0, sizeof(cp)); + cp.local = rx->local; +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 394d18857979..3eb1ab40ca5c 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -489,7 +489,6 @@ enum rxrpc_call_flag { + RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ + RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ + RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */ +- RXRPC_CALL_IS_INTR, /* The call is interruptible */ + RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ + }; + +@@ -598,6 +597,7 @@ struct rxrpc_call { + atomic_t usage; + u16 service_id; /* service ID */ + u8 security_ix; /* Security type */ ++ enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ + u32 call_id; /* call ID on connection */ + u32 cid; /* connection ID plus channel index */ + int debug_id; /* debug ID for printks */ +@@ -720,7 +720,7 @@ struct rxrpc_call_params { + u32 normal; /* Max time since last call packet (msec) */ + } timeouts; + u8 nr_timeouts; /* Number of timeouts specified */ +- bool intr; /* The call is interruptible */ ++ enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ + }; + + struct rxrpc_send_params { +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c +index c9f34b0a11df..f07970207b54 100644 +--- a/net/rxrpc/call_object.c ++++ b/net/rxrpc/call_object.c +@@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, + return call; + } + +- if (p->intr) +- __set_bit(RXRPC_CALL_IS_INTR, &call->flags); ++ call->interruptibility = p->interruptibility; + call->tx_total_len = p->tx_total_len; + trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, + atomic_read(&call->usage), +diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c +index ea7d4c21f889..f2a1a5dbb5a7 100644 +--- a/net/rxrpc/conn_client.c ++++ b/net/rxrpc/conn_client.c +@@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) + + add_wait_queue_exclusive(&call->waitq, &myself); + for (;;) { +- if (test_bit(RXRPC_CALL_IS_INTR, &call->flags)) ++ switch (call->interruptibility) { ++ case RXRPC_INTERRUPTIBLE: ++ case RXRPC_PREINTERRUPTIBLE: + set_current_state(TASK_INTERRUPTIBLE); +- else ++ break; ++ case RXRPC_UNINTERRUPTIBLE: ++ default: + set_current_state(TASK_UNINTERRUPTIBLE); ++ break; ++ } + if (call->call_id) + break; +- if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && ++ if ((call->interruptibility == RXRPC_INTERRUPTIBLE || ++ call->interruptibility == RXRPC_PREINTERRUPTIBLE) && + signal_pending(current)) { + ret = -ERESTARTSYS; + break; +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index 136eb465bfcb..0fcf157aa09f 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -17,6 +17,21 @@ + #include <net/af_rxrpc.h> + #include "ar-internal.h" + ++/* ++ * Return true if there's sufficient Tx queue space. ++ */ ++static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) ++{ ++ unsigned int win_size = ++ min_t(unsigned int, call->tx_winsize, ++ call->cong_cwnd + call->cong_extra); ++ rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); ++ ++ if (_tx_win) ++ *_tx_win = tx_win; ++ return call->tx_top - tx_win < win_size; ++} ++ + /* + * Wait for space to appear in the Tx queue or a signal to occur. + */ +@@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, + { + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); +- if (call->tx_top - call->tx_hard_ack < +- min_t(unsigned int, call->tx_winsize, +- call->cong_cwnd + call->cong_extra)) ++ if (rxrpc_check_tx_space(call, NULL)) + return 0; + + if (call->state >= RXRPC_CALL_COMPLETE) +@@ -49,7 +62,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, + * Wait for space to appear in the Tx queue uninterruptibly, but with + * a timeout of 2*RTT if no progress was made and a signal occurred. + */ +-static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, ++static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, + struct rxrpc_call *call) + { + rxrpc_seq_t tx_start, tx_win; +@@ -68,16 +81,13 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, + set_current_state(TASK_UNINTERRUPTIBLE); + + tx_win = READ_ONCE(call->tx_hard_ack); +- if (call->tx_top - tx_win < +- min_t(unsigned int, call->tx_winsize, +- call->cong_cwnd + call->cong_extra)) ++ if (rxrpc_check_tx_space(call, &tx_win)) + return 0; + + if (call->state >= RXRPC_CALL_COMPLETE) + return call->error; + +- if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && +- timeout == 0 && ++ if (timeout == 0 && + tx_win == tx_start && signal_pending(current)) + return -EINTR; + +@@ -91,6 +101,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, + } + } + ++/* ++ * Wait for space to appear in the Tx queue uninterruptibly. ++ */ ++static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, ++ struct rxrpc_call *call, ++ long *timeo) ++{ ++ for (;;) { ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ if (rxrpc_check_tx_space(call, NULL)) ++ return 0; ++ ++ if (call->state >= RXRPC_CALL_COMPLETE) ++ return call->error; ++ ++ trace_rxrpc_transmit(call, rxrpc_transmit_wait); ++ *timeo = schedule_timeout(*timeo); ++ } ++} ++ + /* + * wait for space to appear in the transmit/ACK window + * - caller holds the socket locked +@@ -108,10 +138,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, + + add_wait_queue(&call->waitq, &myself); + +- if (waitall) +- ret = rxrpc_wait_for_tx_window_nonintr(rx, call); +- else +- ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); ++ switch (call->interruptibility) { ++ case RXRPC_INTERRUPTIBLE: ++ if (waitall) ++ ret = rxrpc_wait_for_tx_window_waitall(rx, call); ++ else ++ ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); ++ break; ++ case RXRPC_PREINTERRUPTIBLE: ++ case RXRPC_UNINTERRUPTIBLE: ++ default: ++ ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); ++ break; ++ } + + remove_wait_queue(&call->waitq, &myself); + set_current_state(TASK_RUNNING); +@@ -302,9 +341,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, + + _debug("alloc"); + +- if (call->tx_top - call->tx_hard_ack >= +- min_t(unsigned int, call->tx_winsize, +- call->cong_cwnd + call->cong_extra)) { ++ if (!rxrpc_check_tx_space(call, NULL)) { + ret = -EAGAIN; + if (msg->msg_flags & MSG_DONTWAIT) + goto maybe_error; +@@ -619,7 +656,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) + .call.tx_total_len = -1, + .call.user_call_ID = 0, + .call.nr_timeouts = 0, +- .call.intr = true, ++ .call.interruptibility = RXRPC_INTERRUPTIBLE, + .abort_code = 0, + .command = RXRPC_CMD_SEND_DATA, + .exclusive = false, +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index aef240fdf8df..328402ab64a3 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -2022,7 +2022,11 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, + + spin_lock_bh(&rdev->bss_lock); + +- if (WARN_ON(cbss->pub.channel == chan)) ++ /* ++ * Some APs use CSA also for bandwidth changes, i.e., without actually ++ * changing the control channel, so no need to update in such a case. ++ */ ++ if (cbss->pub.channel == chan) + goto done; + + /* use transmitting bss */ +diff --git a/security/keys/key.c b/security/keys/key.c +index 764f4c57913e..e9845d0d8d34 100644 +--- a/security/keys/key.c ++++ b/security/keys/key.c +@@ -381,7 +381,7 @@ int key_payload_reserve(struct key *key, size_t datalen) + spin_lock(&key->user->lock); + + if (delta > 0 && +- (key->user->qnbytes + delta >= maxbytes || ++ (key->user->qnbytes + delta > maxbytes || + key->user->qnbytes + delta < key->user->qnbytes)) { + ret = -EDQUOT; + } +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 9b898c969558..d1a3dea58dee 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -937,8 +937,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) + key_quota_root_maxbytes : key_quota_maxbytes; + + spin_lock(&newowner->lock); +- if (newowner->qnkeys + 1 >= maxkeys || +- newowner->qnbytes + key->quotalen >= maxbytes || ++ if (newowner->qnkeys + 1 > maxkeys || ++ newowner->qnbytes + key->quotalen > maxbytes || + newowner->qnbytes + key->quotalen < + newowner->qnbytes) + goto quota_overrun; +diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c +index 732bbede7ebf..8539047145de 100644 +--- a/sound/core/oss/pcm_plugin.c ++++ b/sound/core/oss/pcm_plugin.c +@@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) + return 0; + } + +-snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames) ++static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t drv_frames, ++ bool check_size) + { + struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; + int stream; +@@ -209,7 +211,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + plugin = snd_pcm_plug_last(plug); + while (plugin && drv_frames > 0) { +- if (drv_frames > plugin->buf_frames) ++ if (check_size && drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; + plugin_prev = plugin->prev; + if (plugin->src_frames) +@@ -222,7 +224,7 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + plugin_next = plugin->next; + if (plugin->dst_frames) + drv_frames = plugin->dst_frames(plugin, drv_frames); +- if (drv_frames > plugin->buf_frames) ++ if (check_size && drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; + plugin = plugin_next; + } +@@ -231,7 +233,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p + return drv_frames; + } + +-snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames) ++static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t clt_frames, ++ bool check_size) + { + struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; + snd_pcm_sframes_t frames; +@@ -252,14 +256,14 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc + if (frames < 0) + return frames; + } +- if (frames > plugin->buf_frames) ++ if (check_size && frames > plugin->buf_frames) + frames = plugin->buf_frames; + plugin = plugin_next; + } + } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { + plugin = snd_pcm_plug_last(plug); + while (plugin) { +- if (frames > plugin->buf_frames) ++ if (check_size && frames > plugin->buf_frames) + frames = plugin->buf_frames; + plugin_prev = plugin->prev; + if (plugin->src_frames) { +@@ -274,6 +278,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc + return frames; + } + ++snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t drv_frames) ++{ ++ return plug_client_size(plug, drv_frames, false); ++} ++ ++snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, ++ snd_pcm_uframes_t clt_frames) ++{ ++ return plug_slave_size(plug, clt_frames, false); ++} ++ + static int snd_pcm_plug_formats(const struct snd_mask *mask, + snd_pcm_format_t format) + { +@@ -630,7 +646,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st + src_channels = dst_channels; + plugin = next; + } +- return snd_pcm_plug_client_size(plug, frames); ++ return plug_client_size(plug, frames, true); + } + + snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size) +@@ -640,7 +656,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str + snd_pcm_sframes_t frames = size; + int err; + +- frames = snd_pcm_plug_slave_size(plug, frames); ++ frames = plug_slave_size(plug, frames, true); + if (frames < 0) + return frames; + +diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c +index b7d9160ed868..c6e1e03a5e4d 100644 +--- a/sound/pci/hda/hda_beep.c ++++ b/sound/pci/hda/hda_beep.c +@@ -290,8 +290,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, + { + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); + struct hda_beep *beep = codec->beep; ++ int chs = get_amp_channels(kcontrol); ++ + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { +- ucontrol->value.integer.value[0] = ++ if (chs & 1) ++ ucontrol->value.integer.value[0] = beep->enabled; ++ if (chs & 2) + ucontrol->value.integer.value[1] = beep->enabled; + return 0; + } +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 9d3b28bcba9b..d2d5a19bdecc 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2074,6 +2074,17 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream, + #endif + } + ++/* Blacklist for skipping the whole probe: ++ * some HD-audio PCI entries are exposed without any codecs, and such devices ++ * should be ignored from the beginning. ++ */ ++static const struct snd_pci_quirk driver_blacklist[] = { ++ SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0), ++ SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0), ++ SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0), ++ {} ++}; ++ + static const struct hda_controller_ops pci_hda_ops = { + .disable_msi_reset_irq = disable_msi_reset_irq, + .pcm_mmap_prepare = pcm_mmap_prepare, +@@ -2090,6 +2101,11 @@ static int azx_probe(struct pci_dev *pci, + bool schedule_probe; + int err; + ++ if (snd_pci_quirk_lookup(pci, driver_blacklist)) { ++ dev_info(&pci->dev, "Skipping the blacklisted device\n"); ++ return -ENODEV; ++ } ++ + if (dev >= SNDRV_CARDS) + return -ENODEV; + if (!enable[dev]) { +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index f44d8e258933..3bb9821f1a48 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -107,6 +107,7 @@ struct alc_spec { + unsigned int done_hp_init:1; + unsigned int no_shutup_pins:1; + unsigned int ultra_low_power:1; ++ unsigned int has_hs_key:1; + + /* for PLL fix */ + hda_nid_t pll_nid; +@@ -367,7 +368,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0215: + case 0x10ec0233: + case 0x10ec0235: ++ case 0x10ec0236: + case 0x10ec0255: ++ case 0x10ec0256: + case 0x10ec0257: + case 0x10ec0282: + case 0x10ec0283: +@@ -379,11 +382,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0300: + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; +- case 0x10ec0236: +- case 0x10ec0256: +- alc_write_coef_idx(codec, 0x36, 0x5757); +- alc_update_coef_idx(codec, 0x10, 1<<9, 0); +- break; + case 0x10ec0275: + alc_update_coef_idx(codec, 0xe, 0, 1<<0); + break; +@@ -2449,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), ++ SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), +@@ -2982,6 +2981,107 @@ static int alc269_parse_auto_config(struct hda_codec *codec) + return alc_parse_auto_config(codec, alc269_ignore, ssids); + } + ++static const struct hda_jack_keymap alc_headset_btn_keymap[] = { ++ { SND_JACK_BTN_0, KEY_PLAYPAUSE }, ++ { SND_JACK_BTN_1, KEY_VOICECOMMAND }, ++ { SND_JACK_BTN_2, KEY_VOLUMEUP }, ++ { SND_JACK_BTN_3, KEY_VOLUMEDOWN }, ++ {} ++}; ++ ++static void alc_headset_btn_callback(struct hda_codec *codec, ++ struct hda_jack_callback *jack) ++{ ++ int report = 0; ++ ++ if (jack->unsol_res & (7 << 13)) ++ report |= SND_JACK_BTN_0; ++ ++ if (jack->unsol_res & (1 << 16 | 3 << 8)) ++ report |= SND_JACK_BTN_1; ++ ++ /* Volume up key */ ++ if (jack->unsol_res & (7 << 23)) ++ report |= SND_JACK_BTN_2; ++ ++ /* Volume down key */ ++ if (jack->unsol_res & (7 << 10)) ++ report |= SND_JACK_BTN_3; ++ ++ jack->jack->button_state = report; ++} ++ ++static void alc_disable_headset_jack_key(struct hda_codec *codec) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (!spec->has_hs_key) ++ return; ++ ++ switch (codec->core.vendor_id) { ++ case 0x10ec0215: ++ case 0x10ec0225: ++ case 0x10ec0285: ++ case 0x10ec0295: ++ case 0x10ec0289: ++ case 0x10ec0299: ++ alc_write_coef_idx(codec, 0x48, 0x0); ++ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); ++ alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0); ++ break; ++ case 0x10ec0236: ++ case 0x10ec0256: ++ alc_write_coef_idx(codec, 0x48, 0x0); ++ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); ++ break; ++ } ++} ++ ++static void alc_enable_headset_jack_key(struct hda_codec *codec) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ if (!spec->has_hs_key) ++ return; ++ ++ switch (codec->core.vendor_id) { ++ case 0x10ec0215: ++ case 0x10ec0225: ++ case 0x10ec0285: ++ case 0x10ec0295: ++ case 0x10ec0289: ++ case 0x10ec0299: ++ alc_write_coef_idx(codec, 0x48, 0xd011); ++ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); ++ alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8); ++ break; ++ case 0x10ec0236: ++ case 0x10ec0256: ++ alc_write_coef_idx(codec, 0x48, 0xd011); ++ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); ++ break; ++ } ++} ++ ++static void alc_fixup_headset_jack(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PRE_PROBE: ++ spec->has_hs_key = 1; ++ snd_hda_jack_detect_enable_callback(codec, 0x55, ++ alc_headset_btn_callback); ++ snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, ++ SND_JACK_HEADSET, alc_headset_btn_keymap); ++ break; ++ case HDA_FIXUP_ACT_INIT: ++ alc_enable_headset_jack_key(codec); ++ break; ++ } ++} ++ + static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) + { + alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0); +@@ -3269,7 +3369,13 @@ static void alc256_init(struct hda_codec *codec) + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ + alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ + alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); +- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ ++ /* ++ * Expose headphone mic (or possibly Line In on some machines) instead ++ * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See ++ * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of ++ * this register. ++ */ ++ alc_write_coef_idx(codec, 0x36, 0x5757); + } + + static void alc256_shutup(struct hda_codec *codec) +@@ -3372,6 +3478,8 @@ static void alc225_shutup(struct hda_codec *codec) + + if (!hp_pin) + hp_pin = 0x21; ++ ++ alc_disable_headset_jack_key(codec); + /* 3k pull low control for Headset jack. */ + alc_update_coef_idx(codec, 0x4a, 0, 3 << 10); + +@@ -3411,6 +3519,9 @@ static void alc225_shutup(struct hda_codec *codec) + alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4); + msleep(30); + } ++ ++ alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); ++ alc_enable_headset_jack_key(codec); + } + + static void alc_default_init(struct hda_codec *codec) +@@ -4008,6 +4119,12 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec, + alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10); + } + ++static void alc285_fixup_hp_gpio_led(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00); ++} ++ + static void alc286_fixup_hp_gpio_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +@@ -5375,17 +5492,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec, + } + } + +-static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec, +- const struct hda_fixup *fix, +- int action) +-{ +- if (action != HDA_FIXUP_ACT_PRE_PROBE) +- return; +- +- snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1); +- snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP); +-} +- + static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +@@ -5662,69 +5768,6 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, + snd_hda_override_wcaps(codec, 0x03, 0); + } + +-static const struct hda_jack_keymap alc_headset_btn_keymap[] = { +- { SND_JACK_BTN_0, KEY_PLAYPAUSE }, +- { SND_JACK_BTN_1, KEY_VOICECOMMAND }, +- { SND_JACK_BTN_2, KEY_VOLUMEUP }, +- { SND_JACK_BTN_3, KEY_VOLUMEDOWN }, +- {} +-}; +- +-static void alc_headset_btn_callback(struct hda_codec *codec, +- struct hda_jack_callback *jack) +-{ +- int report = 0; +- +- if (jack->unsol_res & (7 << 13)) +- report |= SND_JACK_BTN_0; +- +- if (jack->unsol_res & (1 << 16 | 3 << 8)) +- report |= SND_JACK_BTN_1; +- +- /* Volume up key */ +- if (jack->unsol_res & (7 << 23)) +- report |= SND_JACK_BTN_2; +- +- /* Volume down key */ +- if (jack->unsol_res & (7 << 10)) +- report |= SND_JACK_BTN_3; +- +- jack->jack->button_state = report; +-} +- +-static void alc_fixup_headset_jack(struct hda_codec *codec, +- const struct hda_fixup *fix, int action) +-{ +- +- switch (action) { +- case HDA_FIXUP_ACT_PRE_PROBE: +- snd_hda_jack_detect_enable_callback(codec, 0x55, +- alc_headset_btn_callback); +- snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, +- SND_JACK_HEADSET, alc_headset_btn_keymap); +- break; +- case HDA_FIXUP_ACT_INIT: +- switch (codec->core.vendor_id) { +- case 0x10ec0215: +- case 0x10ec0225: +- case 0x10ec0285: +- case 0x10ec0295: +- case 0x10ec0289: +- case 0x10ec0299: +- alc_write_coef_idx(codec, 0x48, 0xd011); +- alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); +- alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8); +- break; +- case 0x10ec0236: +- case 0x10ec0256: +- alc_write_coef_idx(codec, 0x48, 0xd011); +- alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); +- break; +- } +- break; +- } +-} +- + static void alc295_fixup_chromebook(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +@@ -5863,8 +5906,6 @@ enum { + ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, + ALC275_FIXUP_DELL_XPS, +- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, +- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2, + ALC293_FIXUP_LENOVO_SPK_NOISE, + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC255_FIXUP_DELL_SPK_NOISE, +@@ -5923,6 +5964,7 @@ enum { + ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_HEADSET_JACK, + ALC294_FIXUP_ASUS_HPE, ++ ALC285_FIXUP_HP_GPIO_LED, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -6604,23 +6646,6 @@ static const struct hda_fixup alc269_fixups[] = { + {} + } + }, +- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { +- .type = HDA_FIXUP_VERBS, +- .v.verbs = (const struct hda_verb[]) { +- /* Disable pass-through path for FRONT 14h */ +- {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, +- {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, +- {} +- }, +- .chained = true, +- .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE +- }, +- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = { +- .type = HDA_FIXUP_FUNC, +- .v.func = alc256_fixup_dell_xps_13_headphone_noise2, +- .chained = true, +- .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE +- }, + [ALC293_FIXUP_LENOVO_SPK_NOISE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_aamix, +@@ -7061,6 +7086,10 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, ++ [ALC285_FIXUP_HP_GPIO_LED] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_hp_gpio_led, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -7114,17 +7143,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), +- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), + SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP), +- SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), + SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE), +- SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), +@@ -7208,6 +7234,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), ++ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -7299,6 +7326,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), ++ SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), +@@ -7477,7 +7505,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"}, + {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"}, + {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"}, +- {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"}, + {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, + {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, +diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c +index 9d71e9d5c9a0..3cf41c11a405 100644 +--- a/sound/pci/ice1712/prodigy_hifi.c ++++ b/sound/pci/ice1712/prodigy_hifi.c +@@ -536,7 +536,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, + struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); + + mutex_lock(&ice->gpio_mutex); +- ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; ++ ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; + mutex_unlock(&ice->gpio_mutex); + return 0; + } +@@ -550,7 +550,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, + + mutex_lock(&ice->gpio_mutex); + oval = wm_get(ice, WM_ADC_MUX); +- nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; ++ nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0]; + if (nval != oval) { + wm_put(ice, WM_ADC_MUX, nval); + change = 1; +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index ebd785f9aa46..e0ff40b10d85 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -802,7 +802,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, + val = max - val; + p->connect = !!val; + } else { +- p->connect = 0; ++ /* since a virtual mixer has no backing registers to ++ * decide which path to connect, it will try to match ++ * with initial state. This is to ensure ++ * that the default mixer choice will be ++ * correctly powered up during initialization. ++ */ ++ p->connect = invert; + } + } + +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c +index 652657dc6809..55ffb34be95e 100644 +--- a/sound/soc/soc-ops.c ++++ b/sound/soc/soc-ops.c +@@ -825,7 +825,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, + unsigned int regbase = mc->regbase; + unsigned int regcount = mc->regcount; + unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; +- unsigned int regwmask = (1<<regwshift)-1; ++ unsigned int regwmask = (1UL<<regwshift)-1; + unsigned int invert = mc->invert; + unsigned long mask = (1UL<<mc->nbits)-1; + long min = mc->min; +@@ -874,7 +874,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, + unsigned int regbase = mc->regbase; + unsigned int regcount = mc->regcount; + unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; +- unsigned int regwmask = (1<<regwshift)-1; ++ unsigned int regwmask = (1UL<<regwshift)-1; + unsigned int invert = mc->invert; + unsigned long mask = (1UL<<mc->nbits)-1; + long max = mc->max; +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 8de29f48442f..e4ff0796526c 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2256,7 +2256,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) + continue; + + ret = dpcm_do_trigger(dpcm, be_substream, cmd); +@@ -2286,7 +2287,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, + be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; + break; + case SNDRV_PCM_TRIGGER_STOP: +- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) ++ if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) + continue; + + if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 56a7142f15a0..b84f5f8975a5 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -362,7 +362,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg, + struct snd_soc_component *comp = tplg->comp; + + return soc_tplg_add_dcontrol(comp->card->snd_card, +- comp->dev, k, NULL, comp, kcontrol); ++ comp->dev, k, comp->name_prefix, comp, kcontrol); + } + + /* remove a mixer kcontrol */ +diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c +index 10eb4b8e8e7e..d3259de43712 100644 +--- a/sound/soc/stm/stm32_sai_sub.c ++++ b/sound/soc/stm/stm32_sai_sub.c +@@ -1551,8 +1551,10 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) + + ret = snd_soc_register_component(&pdev->dev, &stm32_component, + &sai->cpu_dai_drv, 1); +- if (ret) ++ if (ret) { ++ snd_dmaengine_pcm_unregister(&pdev->dev); + return ret; ++ } + + if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) + conf = &stm32_sai_pcm_config_spdif; +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index 73baf398c84a..f6a67eecb063 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -349,6 +349,14 @@ static const struct usbmix_name_map dell_alc4020_map[] = { + { 0 } + }; + ++/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX ++ * response for Input Gain Pad (id=19, control=12). Skip it. ++ */ ++static const struct usbmix_name_map asus_rog_map[] = { ++ { 19, NULL, 12 }, /* FU, Input Gain Pad */ ++ {} ++}; ++ + /* + * Control map entries + */ +@@ -468,6 +476,26 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .id = USB_ID(0x05a7, 0x1020), + .map = bose_companion5_map, + }, ++ { /* Gigabyte TRX40 Aorus Pro WiFi */ ++ .id = USB_ID(0x0414, 0xa002), ++ .map = asus_rog_map, ++ }, ++ { /* ASUS ROG Zenith II */ ++ .id = USB_ID(0x0b05, 0x1916), ++ .map = asus_rog_map, ++ }, ++ { /* ASUS ROG Strix */ ++ .id = USB_ID(0x0b05, 0x1917), ++ .map = asus_rog_map, ++ }, ++ { /* MSI TRX40 Creator */ ++ .id = USB_ID(0x0db0, 0x0d64), ++ .map = asus_rog_map, ++ }, ++ { /* MSI TRX40 */ ++ .id = USB_ID(0x0db0, 0x543d), ++ .map = asus_rog_map, ++ }, + { 0 } /* terminator */ + }; + +diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile +index 6080de58861f..6289b8d20dff 100644 +--- a/tools/gpio/Makefile ++++ b/tools/gpio/Makefile +@@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h + + prepare: $(OUTPUT)include/linux/gpio.h + +-GPIO_UTILS_IN := $(output)gpio-utils-in.o ++GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o + $(GPIO_UTILS_IN): prepare FORCE + $(Q)$(MAKE) $(build)=gpio-utils + +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config +index c90f4146e5a2..1a0c2e0f0ed5 100644 +--- a/tools/perf/Makefile.config ++++ b/tools/perf/Makefile.config +@@ -228,8 +228,17 @@ strip-libs = $(filter-out -l%,$(1)) + + PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) + ++# Python 3.8 changed the output of `python-config --ldflags` to not include the ++# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for ++# libpython fails if that flag is not included in LDFLAGS ++ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0) ++ PYTHON_CONFIG_LDFLAGS := --ldflags --embed ++else ++ PYTHON_CONFIG_LDFLAGS := --ldflags ++endif ++ + ifdef PYTHON_CONFIG +- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) ++ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null) + PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) +diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile +index 397d6b612502..aa6abfe0749c 100644 +--- a/tools/testing/radix-tree/Makefile ++++ b/tools/testing/radix-tree/Makefile +@@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu + TARGETS = main idr-test multiorder xarray + CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o + OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ +- regression4.o \ +- tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o ++ regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \ ++ iteration_check_2.o benchmark.o + + ifndef SHIFT + SHIFT=3 +diff --git a/tools/testing/radix-tree/iteration_check_2.c b/tools/testing/radix-tree/iteration_check_2.c +new file mode 100644 +index 000000000000..aac5c50a3674 +--- /dev/null ++++ b/tools/testing/radix-tree/iteration_check_2.c +@@ -0,0 +1,87 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * iteration_check_2.c: Check that deleting a tagged entry doesn't cause ++ * an RCU walker to finish early. ++ * Copyright (c) 2020 Oracle ++ * Author: Matthew Wilcox <willy@infradead.org> ++ */ ++#include <pthread.h> ++#include "test.h" ++ ++static volatile bool test_complete; ++ ++static void *iterator(void *arg) ++{ ++ XA_STATE(xas, arg, 0); ++ void *entry; ++ ++ rcu_register_thread(); ++ ++ while (!test_complete) { ++ xas_set(&xas, 0); ++ rcu_read_lock(); ++ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) ++ ; ++ rcu_read_unlock(); ++ assert(xas.xa_index >= 100); ++ } ++ ++ rcu_unregister_thread(); ++ return NULL; ++} ++ ++static void *throbber(void *arg) ++{ ++ struct xarray *xa = arg; ++ ++ rcu_register_thread(); ++ ++ while (!test_complete) { ++ int i; ++ ++ for (i = 0; i < 100; i++) { ++ xa_store(xa, i, xa_mk_value(i), GFP_KERNEL); ++ xa_set_mark(xa, i, XA_MARK_0); ++ } ++ for (i = 0; i < 100; i++) ++ xa_erase(xa, i); ++ } ++ ++ rcu_unregister_thread(); ++ return NULL; ++} ++ ++void iteration_test2(unsigned test_duration) ++{ ++ pthread_t threads[2]; ++ DEFINE_XARRAY(array); ++ int i; ++ ++ printv(1, "Running iteration test 2 for %d seconds\n", test_duration); ++ ++ test_complete = false; ++ ++ xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL); ++ xa_set_mark(&array, 100, XA_MARK_0); ++ ++ if (pthread_create(&threads[0], NULL, iterator, &array)) { ++ perror("create iterator thread"); ++ exit(1); ++ } ++ if (pthread_create(&threads[1], NULL, throbber, &array)) { ++ perror("create throbber thread"); ++ exit(1); ++ } ++ ++ sleep(test_duration); ++ test_complete = true; ++ ++ for (i = 0; i < 2; i++) { ++ if (pthread_join(threads[i], NULL)) { ++ perror("pthread_join"); ++ exit(1); ++ } ++ } ++ ++ xa_destroy(&array); ++} +diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c +index 7a22d6e3732e..f2cbc8e5b97c 100644 +--- a/tools/testing/radix-tree/main.c ++++ b/tools/testing/radix-tree/main.c +@@ -311,6 +311,7 @@ int main(int argc, char **argv) + regression4_test(); + iteration_test(0, 10 + 90 * long_run); + iteration_test(7, 10 + 90 * long_run); ++ iteration_test2(10 + 90 * long_run); + single_thread_tests(long_run); + + /* Free any remaining preallocated nodes */ +diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h +index 1ee4b2c0ad10..34dab4d18744 100644 +--- a/tools/testing/radix-tree/test.h ++++ b/tools/testing/radix-tree/test.h +@@ -34,6 +34,7 @@ void xarray_tests(void); + void tag_check(void); + void multiorder_checks(void); + void iteration_test(unsigned order, unsigned duration); ++void iteration_test2(unsigned duration); + void benchmark(void); + void idr_checks(void); + void ida_tests(void); +diff --git a/tools/testing/selftests/net/reuseport_addr_any.c b/tools/testing/selftests/net/reuseport_addr_any.c +index c6233935fed1..b8475cb29be7 100644 +--- a/tools/testing/selftests/net/reuseport_addr_any.c ++++ b/tools/testing/selftests/net/reuseport_addr_any.c +@@ -21,6 +21,10 @@ + #include <sys/socket.h> + #include <unistd.h> + ++#ifndef SOL_DCCP ++#define SOL_DCCP 269 ++#endif ++ + static const char *IP4_ADDR = "127.0.0.1"; + static const char *IP6_ADDR = "::1"; + static const char *IP4_MAPPED6 = "::ffff:127.0.0.1"; +diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore +index 7101ffd08d66..d021172fa2eb 100644 +--- a/tools/testing/selftests/powerpc/mm/.gitignore ++++ b/tools/testing/selftests/powerpc/mm/.gitignore +@@ -5,3 +5,4 @@ prot_sao + segv_errors + wild_bctr + large_vm_fork_separation ++tlbie_test +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile +index 417306353e07..ca35dd8848b0 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile ++++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile +@@ -7,6 +7,7 @@ noarg: + # The EBB handler is 64-bit code and everything links against it + CFLAGS += -m64 + ++TMPOUT = $(OUTPUT)/ + # Toolchains may build PIE by default which breaks the assembly + no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \ + $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) +diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c +index 5a2d7b8efc40..6af951900aa3 100644 +--- a/tools/testing/selftests/vm/map_hugetlb.c ++++ b/tools/testing/selftests/vm/map_hugetlb.c +@@ -45,20 +45,20 @@ static void check_bytes(char *addr) + printf("First hex is %x\n", *((unsigned int *)addr)); + } + +-static void write_bytes(char *addr) ++static void write_bytes(char *addr, size_t length) + { + unsigned long i; + +- for (i = 0; i < LENGTH; i++) ++ for (i = 0; i < length; i++) + *(addr + i) = (char)i; + } + +-static int read_bytes(char *addr) ++static int read_bytes(char *addr, size_t length) + { + unsigned long i; + + check_bytes(addr); +- for (i = 0; i < LENGTH; i++) ++ for (i = 0; i < length; i++) + if (*(addr + i) != (char)i) { + printf("Mismatch at %lu\n", i); + return 1; +@@ -96,11 +96,11 @@ int main(int argc, char **argv) + + printf("Returned address is %p\n", addr); + check_bytes(addr); +- write_bytes(addr); +- ret = read_bytes(addr); ++ write_bytes(addr, length); ++ ret = read_bytes(addr, length); + + /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */ +- if (munmap(addr, LENGTH)) { ++ if (munmap(addr, length)) { + perror("munmap"); + exit(1); + } +diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c +index 637b6d0ac0d0..11b2301f3aa3 100644 +--- a/tools/testing/selftests/vm/mlock2-tests.c ++++ b/tools/testing/selftests/vm/mlock2-tests.c +@@ -67,59 +67,6 @@ out: + return ret; + } + +-static uint64_t get_pageflags(unsigned long addr) +-{ +- FILE *file; +- uint64_t pfn; +- unsigned long offset; +- +- file = fopen("/proc/self/pagemap", "r"); +- if (!file) { +- perror("fopen pagemap"); +- _exit(1); +- } +- +- offset = addr / getpagesize() * sizeof(pfn); +- +- if (fseek(file, offset, SEEK_SET)) { +- perror("fseek pagemap"); +- _exit(1); +- } +- +- if (fread(&pfn, sizeof(pfn), 1, file) != 1) { +- perror("fread pagemap"); +- _exit(1); +- } +- +- fclose(file); +- return pfn; +-} +- +-static uint64_t get_kpageflags(unsigned long pfn) +-{ +- uint64_t flags; +- FILE *file; +- +- file = fopen("/proc/kpageflags", "r"); +- if (!file) { +- perror("fopen kpageflags"); +- _exit(1); +- } +- +- if (fseek(file, pfn * sizeof(flags), SEEK_SET)) { +- perror("fseek kpageflags"); +- _exit(1); +- } +- +- if (fread(&flags, sizeof(flags), 1, file) != 1) { +- perror("fread kpageflags"); +- _exit(1); +- } +- +- fclose(file); +- return flags; +-} +- + #define VMFLAGS "VmFlags:" + + static bool is_vmflag_set(unsigned long addr, const char *vmflag) +@@ -159,19 +106,13 @@ out: + #define RSS "Rss:" + #define LOCKED "lo" + +-static bool is_vma_lock_on_fault(unsigned long addr) ++static unsigned long get_value_for_name(unsigned long addr, const char *name) + { +- bool ret = false; +- bool locked; +- FILE *smaps = NULL; +- unsigned long vma_size, vma_rss; + char *line = NULL; +- char *value; + size_t size = 0; +- +- locked = is_vmflag_set(addr, LOCKED); +- if (!locked) +- goto out; ++ char *value_ptr; ++ FILE *smaps = NULL; ++ unsigned long value = -1UL; + + smaps = seek_to_smaps_entry(addr); + if (!smaps) { +@@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr) + } + + while (getline(&line, &size, smaps) > 0) { +- if (!strstr(line, SIZE)) { ++ if (!strstr(line, name)) { + free(line); + line = NULL; + size = 0; + continue; + } + +- value = line + strlen(SIZE); +- if (sscanf(value, "%lu kB", &vma_size) < 1) { ++ value_ptr = line + strlen(name); ++ if (sscanf(value_ptr, "%lu kB", &value) < 1) { + printf("Unable to parse smaps entry for Size\n"); + goto out; + } + break; + } + +- while (getline(&line, &size, smaps) > 0) { +- if (!strstr(line, RSS)) { +- free(line); +- line = NULL; +- size = 0; +- continue; +- } +- +- value = line + strlen(RSS); +- if (sscanf(value, "%lu kB", &vma_rss) < 1) { +- printf("Unable to parse smaps entry for Rss\n"); +- goto out; +- } +- break; +- } +- +- ret = locked && (vma_rss < vma_size); + out: +- free(line); + if (smaps) + fclose(smaps); +- return ret; ++ free(line); ++ return value; + } + +-#define PRESENT_BIT 0x8000000000000000ULL +-#define PFN_MASK 0x007FFFFFFFFFFFFFULL +-#define UNEVICTABLE_BIT (1UL << 18) +- +-static int lock_check(char *map) ++static bool is_vma_lock_on_fault(unsigned long addr) + { +- unsigned long page_size = getpagesize(); +- uint64_t page1_flags, page2_flags; ++ bool locked; ++ unsigned long vma_size, vma_rss; + +- page1_flags = get_pageflags((unsigned long)map); +- page2_flags = get_pageflags((unsigned long)map + page_size); ++ locked = is_vmflag_set(addr, LOCKED); ++ if (!locked) ++ return false; + +- /* Both pages should be present */ +- if (((page1_flags & PRESENT_BIT) == 0) || +- ((page2_flags & PRESENT_BIT) == 0)) { +- printf("Failed to make both pages present\n"); +- return 1; +- } ++ vma_size = get_value_for_name(addr, SIZE); ++ vma_rss = get_value_for_name(addr, RSS); + +- page1_flags = get_kpageflags(page1_flags & PFN_MASK); +- page2_flags = get_kpageflags(page2_flags & PFN_MASK); ++ /* only one page is faulted in */ ++ return (vma_rss < vma_size); ++} + +- /* Both pages should be unevictable */ +- if (((page1_flags & UNEVICTABLE_BIT) == 0) || +- ((page2_flags & UNEVICTABLE_BIT) == 0)) { +- printf("Failed to make both pages unevictable\n"); +- return 1; +- } ++#define PRESENT_BIT 0x8000000000000000ULL ++#define PFN_MASK 0x007FFFFFFFFFFFFFULL ++#define UNEVICTABLE_BIT (1UL << 18) + +- if (!is_vmflag_set((unsigned long)map, LOCKED)) { +- printf("VMA flag %s is missing on page 1\n", LOCKED); +- return 1; +- } ++static int lock_check(unsigned long addr) ++{ ++ bool locked; ++ unsigned long vma_size, vma_rss; + +- if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) { +- printf("VMA flag %s is missing on page 2\n", LOCKED); +- return 1; +- } ++ locked = is_vmflag_set(addr, LOCKED); ++ if (!locked) ++ return false; + +- return 0; ++ vma_size = get_value_for_name(addr, SIZE); ++ vma_rss = get_value_for_name(addr, RSS); ++ ++ return (vma_rss == vma_size); + } + + static int unlock_lock_check(char *map) + { +- unsigned long page_size = getpagesize(); +- uint64_t page1_flags, page2_flags; +- +- page1_flags = get_pageflags((unsigned long)map); +- page2_flags = get_pageflags((unsigned long)map + page_size); +- page1_flags = get_kpageflags(page1_flags & PFN_MASK); +- page2_flags = get_kpageflags(page2_flags & PFN_MASK); +- +- if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) { +- printf("A page is still marked unevictable after unlock\n"); +- return 1; +- } +- + if (is_vmflag_set((unsigned long)map, LOCKED)) { + printf("VMA flag %s is present on page 1 after unlock\n", LOCKED); + return 1; + } + +- if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) { +- printf("VMA flag %s is present on page 2 after unlock\n", LOCKED); +- return 1; +- } +- + return 0; + } + +@@ -311,7 +210,7 @@ static int test_mlock_lock() + goto unmap; + } + +- if (lock_check(map)) ++ if (!lock_check((unsigned long)map)) + goto unmap; + + /* Now unlock and recheck attributes */ +@@ -330,64 +229,18 @@ out: + + static int onfault_check(char *map) + { +- unsigned long page_size = getpagesize(); +- uint64_t page1_flags, page2_flags; +- +- page1_flags = get_pageflags((unsigned long)map); +- page2_flags = get_pageflags((unsigned long)map + page_size); +- +- /* Neither page should be present */ +- if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) { +- printf("Pages were made present by MLOCK_ONFAULT\n"); +- return 1; +- } +- + *map = 'a'; +- page1_flags = get_pageflags((unsigned long)map); +- page2_flags = get_pageflags((unsigned long)map + page_size); +- +- /* Only page 1 should be present */ +- if ((page1_flags & PRESENT_BIT) == 0) { +- printf("Page 1 is not present after fault\n"); +- return 1; +- } else if (page2_flags & PRESENT_BIT) { +- printf("Page 2 was made present\n"); +- return 1; +- } +- +- page1_flags = get_kpageflags(page1_flags & PFN_MASK); +- +- /* Page 1 should be unevictable */ +- if ((page1_flags & UNEVICTABLE_BIT) == 0) { +- printf("Failed to make faulted page unevictable\n"); +- return 1; +- } +- + if (!is_vma_lock_on_fault((unsigned long)map)) { + printf("VMA is not marked for lock on fault\n"); + return 1; + } + +- if (!is_vma_lock_on_fault((unsigned long)map + page_size)) { +- printf("VMA is not marked for lock on fault\n"); +- return 1; +- } +- + return 0; + } + + static int unlock_onfault_check(char *map) + { + unsigned long page_size = getpagesize(); +- uint64_t page1_flags; +- +- page1_flags = get_pageflags((unsigned long)map); +- page1_flags = get_kpageflags(page1_flags & PFN_MASK); +- +- if (page1_flags & UNEVICTABLE_BIT) { +- printf("Page 1 is still marked unevictable after unlock\n"); +- return 1; +- } + + if (is_vma_lock_on_fault((unsigned long)map) || + is_vma_lock_on_fault((unsigned long)map + page_size)) { +@@ -445,7 +298,6 @@ static int test_lock_onfault_of_present() + char *map; + int ret = 1; + unsigned long page_size = getpagesize(); +- uint64_t page1_flags, page2_flags; + + map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +@@ -465,17 +317,6 @@ static int test_lock_onfault_of_present() + goto unmap; + } + +- page1_flags = get_pageflags((unsigned long)map); +- page2_flags = get_pageflags((unsigned long)map + page_size); +- page1_flags = get_kpageflags(page1_flags & PFN_MASK); +- page2_flags = get_kpageflags(page2_flags & PFN_MASK); +- +- /* Page 1 should be unevictable */ +- if ((page1_flags & UNEVICTABLE_BIT) == 0) { +- printf("Failed to make present page unevictable\n"); +- goto unmap; +- } +- + if (!is_vma_lock_on_fault((unsigned long)map) || + !is_vma_lock_on_fault((unsigned long)map + page_size)) { + printf("VMA with present pages is not marked lock on fault\n"); +@@ -507,7 +348,7 @@ static int test_munlockall() + goto out; + } + +- if (lock_check(map)) ++ if (!lock_check((unsigned long)map)) + goto unmap; + + if (munlockall()) { +@@ -549,7 +390,7 @@ static int test_munlockall() + goto out; + } + +- if (lock_check(map)) ++ if (!lock_check((unsigned long)map)) + goto unmap; + + if (munlockall()) { +diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c +index 6f22238f3217..12aaa063196e 100644 +--- a/tools/testing/selftests/x86/ptrace_syscall.c ++++ b/tools/testing/selftests/x86/ptrace_syscall.c +@@ -414,8 +414,12 @@ int main() + + #if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16) + vsyscall32 = (void *)getauxval(AT_SYSINFO); +- printf("[RUN]\tCheck AT_SYSINFO return regs\n"); +- test_sys32_regs(do_full_vsyscall32); ++ if (vsyscall32) { ++ printf("[RUN]\tCheck AT_SYSINFO return regs\n"); ++ test_sys32_regs(do_full_vsyscall32); ++ } else { ++ printf("[SKIP]\tAT_SYSINFO is not available\n"); ++ } + #endif + + test_ptrace_syscall_restart(); |