diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1006_linux-5.6.7.patch | 6688 |
2 files changed, 6692 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 073a921e..8000cff7 100644 --- a/0000_README +++ b/0000_README @@ -67,6 +67,10 @@ Patch: 1005_linux-5.6.6.patch From: http://www.kernel.org Desc: Linux 5.6.6 +Patch: 1006_linux-5.6.7.patch +From: http://www.kernel.org +Desc: Linux 5.6.7 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1006_linux-5.6.7.patch b/1006_linux-5.6.7.patch new file mode 100644 index 00000000..4c0dfa82 --- /dev/null +++ b/1006_linux-5.6.7.patch @@ -0,0 +1,6688 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index c07815d230bc..6ba631cc5a56 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2795,7 +2795,7 @@ + <name>,<region-number>[,<base>,<size>,<buswidth>,<altbuswidth>] + + mtdparts= [MTD] +- See drivers/mtd/cmdlinepart.c. ++ See drivers/mtd/parsers/cmdlinepart.c + + multitce=off [PPC] This parameter disables the use of the pSeries + firmware feature for updating multiple TCE entries +diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt +index b739f92da58e..1f90eb39870b 100644 +--- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt ++++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt +@@ -118,7 +118,7 @@ Tegra194: + -------- + + pcie@14180000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */ +diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +index eef13b9446a8..a4df53228122 100644 +--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml ++++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +@@ -53,13 +53,12 @@ properties: + description: + Reference to an nvmem node for the calibration data + +- nvmem-cells-names: ++ nvmem-cell-names: + minItems: 1 + maxItems: 2 + items: +- - enum: +- - caldata +- - calsel ++ - const: calib ++ - const: calib_sel + + "#qcom,sensors": + allOf: +@@ -125,7 +124,7 @@ examples: + <0x4a8000 0x1000>; /* SROT */ + + nvmem-cells = <&tsens_caldata>, <&tsens_calsel>; +- nvmem-cell-names = "caldata", "calsel"; ++ nvmem-cell-names = "calib", "calib_sel"; + + interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "uplow"; +diff --git a/Makefile b/Makefile +index af76c00de7f6..b64df959e5d7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 6 +-SUBLEVEL = 6 ++SUBLEVEL = 7 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi +index e6b4b8525f98..bc488df31511 100644 +--- a/arch/arm/boot/dts/imx6qdl.dtsi ++++ b/arch/arm/boot/dts/imx6qdl.dtsi +@@ -1039,9 +1039,8 @@ + compatible = "fsl,imx6q-fec"; + reg = <0x02188000 0x4000>; + interrupt-names = "int0", "pps"; +- interrupts-extended = +- <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>, +- <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>; ++ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, ++ <0 119 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6QDL_CLK_ENET>, + <&clks IMX6QDL_CLK_ENET>, + <&clks IMX6QDL_CLK_ENET_REF>; +diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi +index 5f51f8e5c1fa..d91f92f944c5 100644 +--- a/arch/arm/boot/dts/imx6qp.dtsi ++++ b/arch/arm/boot/dts/imx6qp.dtsi +@@ -77,7 +77,6 @@ + }; + + &fec { +- /delete-property/interrupts-extended; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, + <0 119 IRQ_TYPE_LEVEL_HIGH>; + }; +diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts +index ad1afd403052..66a0ff196eb1 100644 +--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts ++++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts +@@ -58,20 +58,25 @@ + + lvds-encoder { + compatible = "ti,sn75lvds83", "lvds-encoder"; +- #address-cells = <1>; +- #size-cells = <0>; + +- port@0 { +- reg = <0>; +- lvds_in_vop0: endpoint { +- remote-endpoint = <&vop0_out_lvds>; ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ port@0 { ++ reg = <0>; ++ ++ lvds_in_vop0: endpoint { ++ remote-endpoint = <&vop0_out_lvds>; ++ }; + }; +- }; + +- port@1 { +- reg = <1>; +- lvds_out_panel: endpoint { +- remote-endpoint = <&panel_in_lvds>; ++ port@1 { ++ reg = <1>; ++ ++ lvds_out_panel: endpoint { ++ remote-endpoint = <&panel_in_lvds>; ++ }; + }; + }; + }; +@@ -465,7 +470,7 @@ + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>; +- vmmcq-supply = <&vccio_wl>; ++ vqmmc-supply = <&vccio_wl>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; +diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi +index e7b9bef1be6b..bd1287eca253 100644 +--- a/arch/arm/boot/dts/sun8i-a83t.dtsi ++++ b/arch/arm/boot/dts/sun8i-a83t.dtsi +@@ -314,7 +314,7 @@ + + display_clocks: clock@1000000 { + compatible = "allwinner,sun8i-a83t-de2-clk"; +- reg = <0x01000000 0x100000>; ++ reg = <0x01000000 0x10000>; + clocks = <&ccu CLK_BUS_DE>, + <&ccu CLK_PLL_DE>; + clock-names = "bus", +diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi +index a9d5d6ddbd71..a3867491bb46 100644 +--- a/arch/arm/boot/dts/sun8i-r40.dtsi ++++ b/arch/arm/boot/dts/sun8i-r40.dtsi +@@ -119,7 +119,7 @@ + display_clocks: clock@1000000 { + compatible = "allwinner,sun8i-r40-de2-clk", + "allwinner,sun8i-h3-de2-clk"; +- reg = <0x01000000 0x100000>; ++ reg = <0x01000000 0x10000>; + clocks = <&ccu CLK_BUS_DE>, + <&ccu CLK_DE>; + clock-names = "bus", +diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi +index 81ea50838cd5..e5312869c0d2 100644 +--- a/arch/arm/boot/dts/sun8i-v3s.dtsi ++++ b/arch/arm/boot/dts/sun8i-v3s.dtsi +@@ -105,7 +105,7 @@ + + display_clocks: clock@1000000 { + compatible = "allwinner,sun8i-v3s-de2-clk"; +- reg = <0x01000000 0x100000>; ++ reg = <0x01000000 0x10000>; + clocks = <&ccu CLK_BUS_DE>, + <&ccu CLK_DE>; + clock-names = "bus", +diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi +index 5e9c3060aa08..799f32bafd80 100644 +--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi ++++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi +@@ -114,7 +114,7 @@ + + display_clocks: clock@1000000 { + /* compatible is in per SoC .dtsi file */ +- reg = <0x01000000 0x100000>; ++ reg = <0x01000000 0x10000>; + clocks = <&ccu CLK_BUS_DE>, + <&ccu CLK_DE>; + clock-names = "bus", +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c +index cc29869d12a3..bf85d6db4931 100644 +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[], + rd = arm_bpf_get_reg64(dst, tmp, ctx); + + /* Do LSR operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for LSR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); +@@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[], + rd = arm_bpf_get_reg64(dst, tmp, ctx); + + /* Do ARSH operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for ASR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); +@@ -992,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], + arm_bpf_put_reg32(dst_hi, rd[0], ctx); + } + ++static bool is_ldst_imm(s16 off, const u8 size) ++{ ++ s16 off_max = 0; ++ ++ switch (size) { ++ case BPF_B: ++ case BPF_W: ++ off_max = 0xfff; ++ break; ++ case BPF_H: ++ off_max = 0xff; ++ break; ++ case BPF_DW: ++ /* Need to make sure off+4 does not overflow. */ ++ off_max = 0xfff - 4; ++ break; ++ } ++ return -off_max <= off && off <= off_max; ++} ++ + /* *(size *)(dst + off) = src */ + static inline void emit_str_r(const s8 dst, const s8 src[], +- s32 off, struct jit_ctx *ctx, const u8 sz){ ++ s16 off, struct jit_ctx *ctx, const u8 sz){ + const s8 *tmp = bpf2a32[TMP_REG_1]; +- s32 off_max; + s8 rd; + + rd = arm_bpf_get_reg32(dst, tmp[1], ctx); + +- if (sz == BPF_H) +- off_max = 0xff; +- else +- off_max = 0xfff; +- +- if (off < 0 || off > off_max) { ++ if (!is_ldst_imm(off, sz)) { + emit_a32_mov_i(tmp[0], off, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); + rd = tmp[0]; +@@ -1035,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[], + + /* dst = *(size*)(src + off) */ + static inline void emit_ldx_r(const s8 dst[], const s8 src, +- s32 off, struct jit_ctx *ctx, const u8 sz){ ++ s16 off, struct jit_ctx *ctx, const u8 sz){ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + s8 rm = src; +- s32 off_max; +- +- if (sz == BPF_H) +- off_max = 0xff; +- else +- off_max = 0xfff; + +- if (off < 0 || off > off_max) { ++ if (!is_ldst_imm(off, sz)) { + emit_a32_mov_i(tmp[0], off, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); + rm = tmp[0]; +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +index 862b47dc9dc9..baa6f08dc108 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +@@ -264,7 +264,7 @@ + + display_clocks: clock@0 { + compatible = "allwinner,sun50i-a64-de2-clk"; +- reg = <0x0 0x100000>; ++ reg = <0x0 0x10000>; + clocks = <&ccu CLK_BUS_DE>, + <&ccu CLK_DE>; + clock-names = "bus", +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi +index 53b8ac55a7f3..e5262dab28f5 100644 +--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi +@@ -13,6 +13,12 @@ + #include "armada-372x.dtsi" + + / { ++ aliases { ++ ethernet0 = ð0; ++ serial0 = &uart0; ++ serial1 = &uart1; ++ }; ++ + chosen { + stdout-path = "serial0:115200n8"; + }; +diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +index a211a046b2f2..b90d78a5724b 100644 +--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts ++++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +@@ -367,6 +367,7 @@ + pinctrl-0 = <&cp0_copper_eth_phy_reset>; + reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; ++ reset-deassert-us = <10000>; + }; + + switch0: switch0@4 { +diff --git a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi +index 840466e143b4..68782f161f12 100644 +--- a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi +@@ -17,7 +17,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,cortex-a72", "arm,armv8"; ++ compatible = "arm,cortex-a72"; + reg = <0x000>; + enable-method = "psci"; + #cooling-cells = <2>; +@@ -32,7 +32,7 @@ + }; + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,cortex-a72", "arm,armv8"; ++ compatible = "arm,cortex-a72"; + reg = <0x001>; + enable-method = "psci"; + #cooling-cells = <2>; +@@ -47,7 +47,7 @@ + }; + cpu2: cpu@100 { + device_type = "cpu"; +- compatible = "arm,cortex-a72", "arm,armv8"; ++ compatible = "arm,cortex-a72"; + reg = <0x100>; + enable-method = "psci"; + #cooling-cells = <2>; +@@ -62,7 +62,7 @@ + }; + cpu3: cpu@101 { + device_type = "cpu"; +- compatible = "arm,cortex-a72", "arm,armv8"; ++ compatible = "arm,cortex-a72"; + reg = <0x101>; + enable-method = "psci"; + #cooling-cells = <2>; +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +index ccac43be12ac..a8f024662e60 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi +@@ -1208,7 +1208,7 @@ + }; + + pcie@14100000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; + reg = <0x00 0x14100000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x30000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1253,7 +1253,7 @@ + }; + + pcie@14120000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; + reg = <0x00 0x14120000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x32000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1298,7 +1298,7 @@ + }; + + pcie@14140000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; + reg = <0x00 0x14140000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x34000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1343,7 +1343,7 @@ + }; + + pcie@14160000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>; + reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x36000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1388,7 +1388,7 @@ + }; + + pcie@14180000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1433,7 +1433,7 @@ + }; + + pcie@141a0000 { +- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; ++ compatible = "nvidia,tegra194-pcie"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; + reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */ +@@ -1481,6 +1481,105 @@ + 0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ + }; + ++ pcie_ep@14160000 { ++ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; ++ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>; ++ reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */ ++ 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ ++ 0x00 0x36080000 0x0 0x00040000 /* DBI reg space (256K) */ ++ 0x14 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ ++ reg-names = "appl", "atu_dma", "dbi", "addr_space"; ++ ++ status = "disabled"; ++ ++ num-lanes = <4>; ++ num-ib-windows = <2>; ++ num-ob-windows = <8>; ++ ++ clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_4>; ++ clock-names = "core"; ++ ++ resets = <&bpmp TEGRA194_RESET_PEX0_CORE_4_APB>, ++ <&bpmp TEGRA194_RESET_PEX0_CORE_4>; ++ reset-names = "apb", "core"; ++ ++ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */ ++ interrupt-names = "intr"; ++ ++ nvidia,bpmp = <&bpmp 4>; ++ ++ nvidia,aspm-cmrt-us = <60>; ++ nvidia,aspm-pwr-on-t-us = <20>; ++ nvidia,aspm-l0s-entrance-latency-us = <3>; ++ }; ++ ++ pcie_ep@14180000 { ++ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; ++ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; ++ reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ ++ 0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ ++ 0x00 0x38080000 0x0 0x00040000 /* DBI reg space (256K) */ ++ 0x18 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ ++ reg-names = "appl", "atu_dma", "dbi", "addr_space"; ++ ++ status = "disabled"; ++ ++ num-lanes = <8>; ++ num-ib-windows = <2>; ++ num-ob-windows = <8>; ++ ++ clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>; ++ clock-names = "core"; ++ ++ resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>, ++ <&bpmp TEGRA194_RESET_PEX0_CORE_0>; ++ reset-names = "apb", "core"; ++ ++ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */ ++ interrupt-names = "intr"; ++ ++ nvidia,bpmp = <&bpmp 0>; ++ ++ nvidia,aspm-cmrt-us = <60>; ++ nvidia,aspm-pwr-on-t-us = <20>; ++ nvidia,aspm-l0s-entrance-latency-us = <3>; ++ }; ++ ++ pcie_ep@141a0000 { ++ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; ++ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; ++ reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ ++ 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ ++ 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */ ++ 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ ++ reg-names = "appl", "atu_dma", "dbi", "addr_space"; ++ ++ status = "disabled"; ++ ++ num-lanes = <8>; ++ num-ib-windows = <2>; ++ num-ob-windows = <8>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&clkreq_c5_bi_dir_state>; ++ ++ clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>; ++ clock-names = "core"; ++ ++ resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>, ++ <&bpmp TEGRA194_RESET_PEX1_CORE_5>; ++ reset-names = "apb", "core"; ++ ++ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */ ++ interrupt-names = "intr"; ++ ++ nvidia,bpmp = <&bpmp 5>; ++ ++ nvidia,aspm-cmrt-us = <60>; ++ nvidia,aspm-pwr-on-t-us = <20>; ++ nvidia,aspm-l0s-entrance-latency-us = <3>; ++ }; ++ + sysram@40000000 { + compatible = "nvidia,tegra194-sysram", "mmio-sram"; + reg = <0x0 0x40000000 0x0 0x50000>; +diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h +index f35a9f3315ee..5056ebb902d1 100644 +--- a/arch/csky/abiv1/inc/abi/entry.h ++++ b/arch/csky/abiv1/inc/abi/entry.h +@@ -172,10 +172,7 @@ + addi r6, 0xe + cpwcr r6, cpcr30 + +- lsri r6, 28 +- addi r6, 2 +- lsli r6, 28 +- addi r6, 0xe ++ movi r6, 0 + cpwcr r6, cpcr31 + .endm + +diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c +index 86d187d4e5af..5acc5c2e544e 100644 +--- a/arch/csky/abiv2/fpu.c ++++ b/arch/csky/abiv2/fpu.c +@@ -10,11 +10,6 @@ + #define MTCR_DIST 0xC0006420 + #define MFCR_DIST 0xC0006020 + +-void __init init_fpu(void) +-{ +- mtcr("cr<1, 2>", 0); +-} +- + /* + * fpu_libc_helper() is to help libc to excute: + * - mfcr %a, cr<1, 2> +diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h +index 94a7a58765df..111973c6c713 100644 +--- a/arch/csky/abiv2/inc/abi/entry.h ++++ b/arch/csky/abiv2/inc/abi/entry.h +@@ -230,11 +230,8 @@ + addi r6, 0x1ce + mtcr r6, cr<30, 15> /* Set MSA0 */ + +- lsri r6, 28 +- addi r6, 2 +- lsli r6, 28 +- addi r6, 0x1ce +- mtcr r6, cr<31, 15> /* Set MSA1 */ ++ movi r6, 0 ++ mtcr r6, cr<31, 15> /* Clr MSA1 */ + + /* enable MMU */ + mfcr r6, cr18 +diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h +index 22ca3cf2794a..09e2700a3693 100644 +--- a/arch/csky/abiv2/inc/abi/fpu.h ++++ b/arch/csky/abiv2/inc/abi/fpu.h +@@ -9,7 +9,8 @@ + + int fpu_libc_helper(struct pt_regs *regs); + void fpu_fpe(struct pt_regs *regs); +-void __init init_fpu(void); ++ ++static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } + + void save_to_user_fp(struct user_fp *user_fp); + void restore_from_user_fp(struct user_fp *user_fp); +diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h +index 21e0bd5293dd..c6bcd7f7c720 100644 +--- a/arch/csky/include/asm/processor.h ++++ b/arch/csky/include/asm/processor.h +@@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[]; + struct thread_struct { + unsigned long ksp; /* kernel stack pointer */ + unsigned long sr; /* saved status register */ ++ unsigned long trap_no; /* saved status register */ + + /* FPU regs */ + struct user_fp __aligned(16) user_fp; +diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S +index 61989f9241c0..17ed9d250480 100644 +--- a/arch/csky/kernel/head.S ++++ b/arch/csky/kernel/head.S +@@ -21,6 +21,11 @@ END(_start) + ENTRY(_start_smp_secondary) + SETUP_MMU + ++ /* copy msa1 from CPU0 */ ++ lrw r6, secondary_msa1 ++ ld.w r6, (r6, 0) ++ mtcr r6, cr<31, 15> ++ + /* set stack point */ + lrw r6, secondary_stack + ld.w r6, (r6, 0) +diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c +index 3821e55742f4..819a9a7bf786 100644 +--- a/arch/csky/kernel/setup.c ++++ b/arch/csky/kernel/setup.c +@@ -24,26 +24,9 @@ struct screen_info screen_info = { + }; + #endif + +-phys_addr_t __init_memblock memblock_end_of_REG0(void) +-{ +- return (memblock.memory.regions[0].base + +- memblock.memory.regions[0].size); +-} +- +-phys_addr_t __init_memblock memblock_start_of_REG1(void) +-{ +- return memblock.memory.regions[1].base; +-} +- +-size_t __init_memblock memblock_size_of_REG1(void) +-{ +- return memblock.memory.regions[1].size; +-} +- + static void __init csky_memblock_init(void) + { + unsigned long zone_size[MAX_NR_ZONES]; +- unsigned long zhole_size[MAX_NR_ZONES]; + signed long size; + + memblock_reserve(__pa(_stext), _end - _stext); +@@ -54,54 +37,36 @@ static void __init csky_memblock_init(void) + memblock_dump_all(); + + memset(zone_size, 0, sizeof(zone_size)); +- memset(zhole_size, 0, sizeof(zhole_size)); + + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); +- max_pfn = PFN_DOWN(memblock_end_of_DRAM()); +- +- max_low_pfn = PFN_UP(memblock_end_of_REG0()); +- if (max_low_pfn == 0) +- max_low_pfn = max_pfn; ++ max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + size = max_pfn - min_low_pfn; + +- if (memblock.memory.cnt > 1) { +- zone_size[ZONE_NORMAL] = +- PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn; +- zhole_size[ZONE_NORMAL] = +- PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn; ++ if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET)) ++ zone_size[ZONE_NORMAL] = size; ++ else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) { ++ zone_size[ZONE_NORMAL] = ++ PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); ++ max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; + } else { +- if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) +- zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn; +- else { +- zone_size[ZONE_NORMAL] = ++ zone_size[ZONE_NORMAL] = + PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); +- max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; +- } ++ max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; ++ write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); + } + + #ifdef CONFIG_HIGHMEM +- size = 0; +- if (memblock.memory.cnt > 1) { +- size = PFN_DOWN(memblock_size_of_REG1()); +- highstart_pfn = PFN_DOWN(memblock_start_of_REG1()); +- } else { +- size = max_pfn - min_low_pfn - +- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); +- highstart_pfn = min_low_pfn + +- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); +- } +- +- if (size > 0) +- zone_size[ZONE_HIGHMEM] = size; ++ zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; + +- highend_pfn = max_pfn; ++ highstart_pfn = max_low_pfn; ++ highend_pfn = max_pfn; + #endif + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + dma_contiguous_reserve(0); + +- free_area_init_node(0, zone_size, min_low_pfn, zhole_size); ++ free_area_init_node(0, zone_size, min_low_pfn, NULL); + } + + void __init setup_arch(char **cmdline_p) +diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c +index 0bb0954d5570..b5c5bc3afeb5 100644 +--- a/arch/csky/kernel/smp.c ++++ b/arch/csky/kernel/smp.c +@@ -22,6 +22,9 @@ + #include <asm/sections.h> + #include <asm/mmu_context.h> + #include <asm/pgalloc.h> ++#ifdef CONFIG_CPU_HAS_FPU ++#include <abi/fpu.h> ++#endif + + struct ipi_data_struct { + unsigned long bits ____cacheline_aligned; +@@ -156,6 +159,8 @@ volatile unsigned int secondary_hint; + volatile unsigned int secondary_ccr; + volatile unsigned int secondary_stack; + ++unsigned long secondary_msa1; ++ + int __cpu_up(unsigned int cpu, struct task_struct *tidle) + { + unsigned long mask = 1 << cpu; +@@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; + secondary_hint = mfcr("cr31"); + secondary_ccr = mfcr("cr18"); ++ secondary_msa1 = read_mmu_msa1(); + + /* + * Because other CPUs are in reset status, we must flush data +diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c +index b057480e7463..63715cb90ee9 100644 +--- a/arch/csky/kernel/traps.c ++++ b/arch/csky/kernel/traps.c +@@ -115,8 +115,9 @@ asmlinkage void trap_c(struct pt_regs *regs) + int sig; + unsigned long vector; + siginfo_t info; ++ struct task_struct *tsk = current; + +- vector = (mfcr("psr") >> 16) & 0xff; ++ vector = (regs->sr >> 16) & 0xff; + + switch (vector) { + case VEC_ZERODIV: +@@ -129,6 +130,7 @@ asmlinkage void trap_c(struct pt_regs *regs) + sig = SIGTRAP; + break; + case VEC_ILLEGAL: ++ tsk->thread.trap_no = vector; + die_if_kernel("Kernel mode ILLEGAL", regs, vector); + #ifndef CONFIG_CPU_NO_USER_BKPT + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) +@@ -146,16 +148,20 @@ asmlinkage void trap_c(struct pt_regs *regs) + sig = SIGTRAP; + break; + case VEC_ACCESS: ++ tsk->thread.trap_no = vector; + return buserr(regs); + #ifdef CONFIG_CPU_NEED_SOFTALIGN + case VEC_ALIGN: ++ tsk->thread.trap_no = vector; + return csky_alignment(regs); + #endif + #ifdef CONFIG_CPU_HAS_FPU + case VEC_FPE: ++ tsk->thread.trap_no = vector; + die_if_kernel("Kernel mode FPE", regs, vector); + return fpu_fpe(regs); + case VEC_PRIV: ++ tsk->thread.trap_no = vector; + die_if_kernel("Kernel mode PRIV", regs, vector); + if (fpu_libc_helper(regs)) + return; +@@ -164,5 +170,8 @@ asmlinkage void trap_c(struct pt_regs *regs) + sig = SIGSEGV; + break; + } ++ ++ tsk->thread.trap_no = vector; ++ + send_sig(sig, current, 0); + } +diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c +index f76618b630f9..562c7f708749 100644 +--- a/arch/csky/mm/fault.c ++++ b/arch/csky/mm/fault.c +@@ -179,11 +179,14 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { ++ tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + force_sig_fault(SIGSEGV, si_code, (void __user *)address); + return; + } + + no_context: ++ tsk->thread.trap_no = (regs->sr >> 16) & 0xff; ++ + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; +@@ -198,6 +201,8 @@ no_context: + die_if_kernel("Oops", regs, write); + + out_of_memory: ++ tsk->thread.trap_no = (regs->sr >> 16) & 0xff; ++ + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). +@@ -206,6 +211,8 @@ out_of_memory: + return; + + do_sigbus: ++ tsk->thread.trap_no = (regs->sr >> 16) & 0xff; ++ + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exceptions or die */ +diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts +index c340f947baa0..fc4e64200c3d 100644 +--- a/arch/mips/boot/dts/ingenic/ci20.dts ++++ b/arch/mips/boot/dts/ingenic/ci20.dts +@@ -62,6 +62,11 @@ + enable-active-high; + }; + ++ ir: ir { ++ compatible = "gpio-ir-receiver"; ++ gpios = <&gpe 3 GPIO_ACTIVE_LOW>; ++ }; ++ + wlan0_power: fixedregulator@1 { + compatible = "regulator-fixed"; + regulator-name = "wlan0_power"; +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c +index 577345382b23..673f13b87db1 100644 +--- a/arch/powerpc/kernel/prom_init.c ++++ b/arch/powerpc/kernel/prom_init.c +@@ -1773,6 +1773,9 @@ static void __init prom_rtas_os_term(char *str) + if (token == 0) + prom_panic("Could not get token for ibm,os-term\n"); + os_term_args.token = cpu_to_be32(token); ++ os_term_args.nargs = cpu_to_be32(1); ++ os_term_args.nret = cpu_to_be32(1); ++ os_term_args.args[0] = cpu_to_be32(__pa(str)); + prom_rtas_hcall((uint64_t)&os_term_args); + } + #endif /* CONFIG_PPC_SVM */ +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c +index 2cefd071b848..c0c43a733830 100644 +--- a/arch/powerpc/kvm/book3s_hv.c ++++ b/arch/powerpc/kvm/book3s_hv.c +@@ -3616,6 +3616,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, + if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + kvmppc_get_gpr(vcpu, 3) == H_CEDE) { + kvmppc_nested_cede(vcpu); ++ kvmppc_set_gpr(vcpu, 3, 0); + trap = 0; + } + } else { +diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c +index 6f019df37916..15b2c6eb506d 100644 +--- a/arch/powerpc/platforms/maple/setup.c ++++ b/arch/powerpc/platforms/maple/setup.c +@@ -291,23 +291,6 @@ static int __init maple_probe(void) + return 1; + } + +-define_machine(maple) { +- .name = "Maple", +- .probe = maple_probe, +- .setup_arch = maple_setup_arch, +- .init_IRQ = maple_init_IRQ, +- .pci_irq_fixup = maple_pci_irq_fixup, +- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, +- .restart = maple_restart, +- .halt = maple_halt, +- .get_boot_time = maple_get_boot_time, +- .set_rtc_time = maple_set_rtc_time, +- .get_rtc_time = maple_get_rtc_time, +- .calibrate_decr = generic_calibrate_decr, +- .progress = maple_progress, +- .power_save = power4_idle, +-}; +- + #ifdef CONFIG_EDAC + /* + * Register a platform device for CPC925 memory controller on +@@ -364,3 +347,20 @@ static int __init maple_cpc925_edac_setup(void) + } + machine_device_initcall(maple, maple_cpc925_edac_setup); + #endif ++ ++define_machine(maple) { ++ .name = "Maple", ++ .probe = maple_probe, ++ .setup_arch = maple_setup_arch, ++ .init_IRQ = maple_init_IRQ, ++ .pci_irq_fixup = maple_pci_irq_fixup, ++ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, ++ .restart = maple_restart, ++ .halt = maple_halt, ++ .get_boot_time = maple_get_boot_time, ++ .set_rtc_time = maple_set_rtc_time, ++ .get_rtc_time = maple_get_rtc_time, ++ .calibrate_decr = generic_calibrate_decr, ++ .progress = maple_progress, ++ .power_save = power4_idle, ++}; +diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c +index 1c23d84a9097..73044634d342 100644 +--- a/arch/s390/crypto/aes_s390.c ++++ b/arch/s390/crypto/aes_s390.c +@@ -342,6 +342,7 @@ static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) + memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); + } ++ memzero_explicit(¶m, sizeof(param)); + return ret; + } + +@@ -470,6 +471,8 @@ static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) + walk.dst.virt.addr, walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); + } ++ memzero_explicit(&pcc_param, sizeof(pcc_param)); ++ memzero_explicit(&xts_param, sizeof(xts_param)); + return ret; + } + +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c +index b095b1c78987..05b908b3a6b3 100644 +--- a/arch/s390/kernel/perf_cpum_sf.c ++++ b/arch/s390/kernel/perf_cpum_sf.c +@@ -1576,6 +1576,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) + unsigned long range = 0, size; + unsigned long long overflow = 0; + struct perf_output_handle *handle = &cpuhw->handle; ++ unsigned long num_sdb; + + aux = perf_get_aux(handle); + if (WARN_ON_ONCE(!aux)) +@@ -1587,13 +1588,14 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) + size >> PAGE_SHIFT); + perf_aux_output_end(handle, size); + ++ num_sdb = aux->sfb.num_sdb; + while (!done) { + /* Get an output handle */ + aux = perf_aux_output_begin(handle, cpuhw->event); + if (handle->size == 0) { + pr_err("The AUX buffer with %lu pages for the " + "diagnostic-sampling mode is full\n", +- aux->sfb.num_sdb); ++ num_sdb); + debug_sprintf_event(sfdbg, 1, + "%s: AUX buffer used up\n", + __func__); +diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c +index 6ebc2117c66c..91b9b3f73de6 100644 +--- a/arch/s390/kernel/processor.c ++++ b/arch/s390/kernel/processor.c +@@ -165,8 +165,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n) + static int show_cpuinfo(struct seq_file *m, void *v) + { + unsigned long n = (unsigned long) v - 1; ++ unsigned long first = cpumask_first(cpu_online_mask); + +- if (!n) ++ if (n == first) + show_cpu_summary(m, v); + if (!machine_has_cpu_mhz) + return 0; +@@ -179,6 +180,8 @@ static inline void *c_update(loff_t *pos) + { + if (*pos) + *pos = cpumask_next(*pos - 1, cpu_online_mask); ++ else ++ *pos = cpumask_first(cpu_online_mask); + return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; + } + +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c +index 9d9ab77d02dd..364e3a89c096 100644 +--- a/arch/s390/mm/gmap.c ++++ b/arch/s390/mm/gmap.c +@@ -1844,6 +1844,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + goto out_free; + } else if (*table & _REGION_ENTRY_ORIGIN) { + rc = -EAGAIN; /* Race with shadow */ ++ goto out_free; + } + crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); + /* mark as invalid as long as the parent table is not protected */ +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c +index 247f95da057b..eca45ad2166c 100644 +--- a/arch/um/drivers/ubd_kern.c ++++ b/arch/um/drivers/ubd_kern.c +@@ -1607,7 +1607,9 @@ int io_thread(void *arg) + written = 0; + + do { +- res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); ++ res = os_write_file(kernel_fd, ++ ((char *) io_req_buffer) + written, ++ n - written); + if (res >= 0) { + written += res; + } +diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c +index fbda10535dab..5c819f89b8c2 100644 +--- a/arch/um/os-Linux/file.c ++++ b/arch/um/os-Linux/file.c +@@ -8,6 +8,7 @@ + #include <errno.h> + #include <fcntl.h> + #include <signal.h> ++#include <linux/falloc.h> + #include <sys/ioctl.h> + #include <sys/mount.h> + #include <sys/socket.h> +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c +index b0da5320bcff..624f5d9b0f79 100644 +--- a/arch/x86/hyperv/hv_init.c ++++ b/arch/x86/hyperv/hv_init.c +@@ -20,6 +20,7 @@ + #include <linux/mm.h> + #include <linux/hyperv.h> + #include <linux/slab.h> ++#include <linux/kernel.h> + #include <linux/cpuhotplug.h> + #include <linux/syscore_ops.h> + #include <clocksource/hyperv_timer.h> +@@ -419,11 +420,14 @@ void hyperv_cleanup(void) + } + EXPORT_SYMBOL_GPL(hyperv_cleanup); + +-void hyperv_report_panic(struct pt_regs *regs, long err) ++void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) + { + static bool panic_reported; + u64 guest_id; + ++ if (in_die && !panic_on_oops) ++ return; ++ + /* + * We prefer to report panic on 'die' chain as we have proper + * registers to report, but if we miss it (e.g. on BUG()) we need +diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c +index caf2edccbad2..49ae4e1ac9cd 100644 +--- a/arch/x86/kernel/acpi/cstate.c ++++ b/arch/x86/kernel/acpi/cstate.c +@@ -161,7 +161,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, + + /* Make sure we are running on right CPU */ + +- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); ++ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, ++ false); + if (retval == 0) { + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index caa032ce3fe3..5e296a7e6036 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -263,6 +263,16 @@ static void __init ms_hyperv_init_platform(void) + cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); + } + ++ /* ++ * Hyper-V expects to get crash register data or kmsg when ++ * crash enlightment is available and system crashes. Set ++ * crash_kexec_post_notifiers to be true to make sure that ++ * calling crash enlightment interface before running kdump ++ * kernel. ++ */ ++ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) ++ crash_kexec_post_notifiers = true; ++ + #ifdef CONFIG_X86_LOCAL_APIC + if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { +diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S +index 1d0cee3163e4..1e900676722b 100644 +--- a/arch/x86/xen/xen-head.S ++++ b/arch/x86/xen/xen-head.S +@@ -35,7 +35,11 @@ SYM_CODE_START(startup_xen) + rep __ASM_SIZE(stos) + + mov %_ASM_SI, xen_start_info +- mov $init_thread_union+THREAD_SIZE, %_ASM_SP ++#ifdef CONFIG_X86_64 ++ mov initial_stack(%rip), %rsp ++#else ++ mov initial_stack, %esp ++#endif + + #ifdef CONFIG_X86_64 + /* Set up %gs. +@@ -51,7 +55,7 @@ SYM_CODE_START(startup_xen) + wrmsr + #endif + +- jmp xen_start_kernel ++ call xen_start_kernel + SYM_CODE_END(startup_xen) + __FINIT + #endif +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c +index 9d963ed518d1..68882b9b8f11 100644 +--- a/block/bfq-cgroup.c ++++ b/block/bfq-cgroup.c +@@ -714,10 +714,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, + + if (entity->sched_data != &bfqg->sched_data) { + bic_set_bfqq(bic, NULL, 0); +- bfq_log_bfqq(bfqd, async_bfqq, +- "bic_change_group: %p %d", +- async_bfqq, async_bfqq->ref); +- bfq_put_queue(async_bfqq); ++ bfq_release_process_ref(bfqd, async_bfqq); + } + } + +@@ -818,39 +815,53 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st) + /** + * bfq_reparent_leaf_entity - move leaf entity to the root_group. + * @bfqd: the device data structure with the root group. +- * @entity: the entity to move. ++ * @entity: the entity to move, if entity is a leaf; or the parent entity ++ * of an active leaf entity to move, if entity is not a leaf. + */ + static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, +- struct bfq_entity *entity) ++ struct bfq_entity *entity, ++ int ioprio_class) + { +- struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct bfq_queue *bfqq; ++ struct bfq_entity *child_entity = entity; ++ ++ while (child_entity->my_sched_data) { /* leaf not reached yet */ ++ struct bfq_sched_data *child_sd = child_entity->my_sched_data; ++ struct bfq_service_tree *child_st = child_sd->service_tree + ++ ioprio_class; ++ struct rb_root *child_active = &child_st->active; ++ ++ child_entity = bfq_entity_of(rb_first(child_active)); ++ ++ if (!child_entity) ++ child_entity = child_sd->in_service_entity; ++ } + ++ bfqq = bfq_entity_to_bfqq(child_entity); + bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); + } + + /** +- * bfq_reparent_active_entities - move to the root group all active +- * entities. ++ * bfq_reparent_active_queues - move to the root group all active queues. + * @bfqd: the device data structure with the root group. + * @bfqg: the group to move from. +- * @st: the service tree with the entities. ++ * @st: the service tree to start the search from. + */ +-static void bfq_reparent_active_entities(struct bfq_data *bfqd, +- struct bfq_group *bfqg, +- struct bfq_service_tree *st) ++static void bfq_reparent_active_queues(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, ++ struct bfq_service_tree *st, ++ int ioprio_class) + { + struct rb_root *active = &st->active; +- struct bfq_entity *entity = NULL; +- +- if (!RB_EMPTY_ROOT(&st->active)) +- entity = bfq_entity_of(rb_first(active)); ++ struct bfq_entity *entity; + +- for (; entity ; entity = bfq_entity_of(rb_first(active))) +- bfq_reparent_leaf_entity(bfqd, entity); ++ while ((entity = bfq_entity_of(rb_first(active)))) ++ bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); + + if (bfqg->sched_data.in_service_entity) + bfq_reparent_leaf_entity(bfqd, +- bfqg->sched_data.in_service_entity); ++ bfqg->sched_data.in_service_entity, ++ ioprio_class); + } + + /** +@@ -882,13 +893,6 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { + st = bfqg->sched_data.service_tree + i; + +- /* +- * The idle tree may still contain bfq_queues belonging +- * to exited task because they never migrated to a different +- * cgroup from the one being destroyed now. +- */ +- bfq_flush_idle_tree(st); +- + /* + * It may happen that some queues are still active + * (busy) upon group destruction (if the corresponding +@@ -901,7 +905,20 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) + * There is no need to put the sync queues, as the + * scheduler has taken no reference. + */ +- bfq_reparent_active_entities(bfqd, bfqg, st); ++ bfq_reparent_active_queues(bfqd, bfqg, st, i); ++ ++ /* ++ * The idle tree may still contain bfq_queues ++ * belonging to exited task because they never ++ * migrated to a different cgroup from the one being ++ * destroyed now. In addition, even ++ * bfq_reparent_active_queues() may happen to add some ++ * entities to the idle tree. It happens if, in some ++ * of the calls to bfq_bfqq_move() performed by ++ * bfq_reparent_active_queues(), the queue to move is ++ * empty and gets expired. ++ */ ++ bfq_flush_idle_tree(st); + } + + __bfq_deactivate_entity(entity, false); +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 4a44c7f19435..78ba57efd16b 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -2716,8 +2716,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) + } + } + +- +-static + void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) + { + /* +diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h +index d1233af9c684..cd224aaf9f52 100644 +--- a/block/bfq-iosched.h ++++ b/block/bfq-iosched.h +@@ -955,6 +955,7 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bool compensate, enum bfqq_expiration reason); + void bfq_put_queue(struct bfq_queue *bfqq); + void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); ++void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); + void bfq_schedule_dispatch(struct bfq_data *bfqd); + void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); + +diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h +index e618ddfab2fd..40f6a3c33a15 100644 +--- a/drivers/acpi/acpica/acnamesp.h ++++ b/drivers/acpi/acpica/acnamesp.h +@@ -256,6 +256,8 @@ u32 + acpi_ns_build_normalized_path(struct acpi_namespace_node *node, + char *full_path, u32 path_size, u8 no_trailing); + ++void acpi_ns_normalize_pathname(char *original_path); ++ + char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, + u8 no_trailing); + +diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c +index aa71f65395d2..ee6a1b77af3f 100644 +--- a/drivers/acpi/acpica/dbinput.c ++++ b/drivers/acpi/acpica/dbinput.c +@@ -468,16 +468,14 @@ char *acpi_db_get_next_token(char *string, + return (NULL); + } + +- /* Remove any spaces at the beginning */ ++ /* Remove any spaces at the beginning, ignore blank lines */ + +- if (*string == ' ') { +- while (*string && (*string == ' ')) { +- string++; +- } ++ while (*string && isspace(*string)) { ++ string++; ++ } + +- if (!(*string)) { +- return (NULL); +- } ++ if (!(*string)) { ++ return (NULL); + } + + switch (*string) { +@@ -570,7 +568,7 @@ char *acpi_db_get_next_token(char *string, + + /* Find end of token */ + +- while (*string && (*string != ' ')) { ++ while (*string && !isspace(*string)) { + string++; + } + break; +diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c +index 5e81a1ae44cf..1d4f8c81028c 100644 +--- a/drivers/acpi/acpica/dswexec.c ++++ b/drivers/acpi/acpica/dswexec.c +@@ -16,6 +16,9 @@ + #include "acinterp.h" + #include "acnamesp.h" + #include "acdebug.h" ++#ifdef ACPI_EXEC_APP ++#include "aecommon.h" ++#endif + + #define _COMPONENT ACPI_DISPATCHER + ACPI_MODULE_NAME("dswexec") +@@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) + u32 op_class; + union acpi_parse_object *next_op; + union acpi_parse_object *first_arg; ++#ifdef ACPI_EXEC_APP ++ char *namepath; ++ union acpi_operand_object *obj_desc; ++#endif + + ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state); + +@@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) + + status = + acpi_ds_eval_buffer_field_operands(walk_state, op); ++ if (ACPI_FAILURE(status)) { ++ break; ++ } ++#ifdef ACPI_EXEC_APP ++ /* ++ * acpi_exec support for namespace initialization file (initialize ++ * buffer_fields in this code.) ++ */ ++ namepath = ++ acpi_ns_get_external_pathname(op->common.node); ++ status = ae_lookup_init_file_entry(namepath, &obj_desc); ++ if (ACPI_SUCCESS(status)) { ++ status = ++ acpi_ex_write_data_to_field(obj_desc, ++ op->common. ++ node->object, ++ NULL); ++ if ACPI_FAILURE ++ (status) { ++ ACPI_EXCEPTION((AE_INFO, status, ++ "While writing to buffer field")); ++ } ++ } ++ ACPI_FREE(namepath); ++ status = AE_OK; ++#endif + break; + + case AML_TYPE_CREATE_OBJECT: +diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c +index 697974e37edf..27069325b6de 100644 +--- a/drivers/acpi/acpica/dswload.c ++++ b/drivers/acpi/acpica/dswload.c +@@ -14,7 +14,6 @@ + #include "acdispat.h" + #include "acinterp.h" + #include "acnamesp.h" +- + #ifdef ACPI_ASL_COMPILER + #include "acdisasm.h" + #endif +@@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) + union acpi_parse_object *op; + acpi_object_type object_type; + acpi_status status = AE_OK; +- + #ifdef ACPI_ASL_COMPILER + u8 param_count; + #endif +diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c +index b31457ca926c..edadbe146506 100644 +--- a/drivers/acpi/acpica/dswload2.c ++++ b/drivers/acpi/acpica/dswload2.c +@@ -15,6 +15,9 @@ + #include "acinterp.h" + #include "acnamesp.h" + #include "acevents.h" ++#ifdef ACPI_EXEC_APP ++#include "aecommon.h" ++#endif + + #define _COMPONENT ACPI_DISPATCHER + ACPI_MODULE_NAME("dswload2") +@@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) + struct acpi_namespace_node *new_node; + u32 i; + u8 region_space; ++#ifdef ACPI_EXEC_APP ++ union acpi_operand_object *obj_desc; ++ char *namepath; ++#endif + + ACPI_FUNCTION_TRACE(ds_load2_end_op); + +@@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) + * be evaluated later during the execution phase + */ + status = acpi_ds_create_buffer_field(op, walk_state); ++ if (ACPI_FAILURE(status)) { ++ ACPI_EXCEPTION((AE_INFO, status, ++ "CreateBufferField failure")); ++ goto cleanup; ++ } + break; + + case AML_TYPE_NAMED_FIELD: +@@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) + case AML_NAME_OP: + + status = acpi_ds_create_node(walk_state, node, op); ++ if (ACPI_FAILURE(status)) { ++ goto cleanup; ++ } ++#ifdef ACPI_EXEC_APP ++ /* ++ * acpi_exec support for namespace initialization file (initialize ++ * Name opcodes in this code.) ++ */ ++ namepath = acpi_ns_get_external_pathname(node); ++ status = ae_lookup_init_file_entry(namepath, &obj_desc); ++ if (ACPI_SUCCESS(status)) { ++ ++ /* Detach any existing object, attach new object */ ++ ++ if (node->object) { ++ acpi_ns_detach_object(node); ++ } ++ acpi_ns_attach_object(node, obj_desc, ++ obj_desc->common.type); ++ } ++ ACPI_FREE(namepath); ++ status = AE_OK; ++#endif + break; + + case AML_METHOD_OP: +diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c +index 370bbc867745..c717fff7d9b5 100644 +--- a/drivers/acpi/acpica/nsnames.c ++++ b/drivers/acpi/acpica/nsnames.c +@@ -13,9 +13,6 @@ + #define _COMPONENT ACPI_NAMESPACE + ACPI_MODULE_NAME("nsnames") + +-/* Local Prototypes */ +-static void acpi_ns_normalize_pathname(char *original_path); +- + /******************************************************************************* + * + * FUNCTION: acpi_ns_get_external_pathname +@@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path); + * for error and debug statements. + * + ******************************************************************************/ +- + char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) + { + char *name_buffer; +@@ -411,7 +407,7 @@ cleanup: + * + ******************************************************************************/ + +-static void acpi_ns_normalize_pathname(char *original_path) ++void acpi_ns_normalize_pathname(char *original_path) + { + char *input_path = original_path; + char *new_path_buffer; +diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c +index eee263cb7beb..c365faf4e6cd 100644 +--- a/drivers/acpi/acpica/utdelete.c ++++ b/drivers/acpi/acpica/utdelete.c +@@ -452,13 +452,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) + * + * FUNCTION: acpi_ut_update_object_reference + * +- * PARAMETERS: object - Increment ref count for this object +- * and all sub-objects ++ * PARAMETERS: object - Increment or decrement the ref count for ++ * this object and all sub-objects + * action - Either REF_INCREMENT or REF_DECREMENT + * + * RETURN: Status + * +- * DESCRIPTION: Increment the object reference count ++ * DESCRIPTION: Increment or decrement the object reference count + * + * Object references are incremented when: + * 1) An object is attached to a Node (namespace object) +@@ -492,7 +492,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) + } + + /* +- * All sub-objects must have their reference count incremented ++ * All sub-objects must have their reference count updated + * also. Different object types have different subobjects. + */ + switch (object->common.type) { +@@ -559,6 +559,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) + break; + } + } ++ + next_object = NULL; + break; + +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index b64c62bfcea5..b2263ec67b43 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -1321,8 +1321,8 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) + */ + static const struct acpi_device_id special_pm_ids[] = { + {"PNP0C0B", }, /* Generic ACPI fan */ +- {"INT1044", }, /* Fan for Tiger Lake generation */ + {"INT3404", }, /* Fan */ ++ {"INTC1044", }, /* Fan for Tiger Lake generation */ + {} + }; + struct acpi_device *adev = ACPI_COMPANION(dev); +diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c +index 387f27ef3368..e4e8b75d39f0 100644 +--- a/drivers/acpi/dptf/dptf_power.c ++++ b/drivers/acpi/dptf/dptf_power.c +@@ -97,8 +97,8 @@ static int dptf_power_remove(struct platform_device *pdev) + } + + static const struct acpi_device_id int3407_device_ids[] = { +- {"INT1047", 0}, + {"INT3407", 0}, ++ {"INTC1047", 0}, + {"", 0}, + }; + MODULE_DEVICE_TABLE(acpi, int3407_device_ids); +diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c +index 1ec7b6900662..bc71a6a60334 100644 +--- a/drivers/acpi/dptf/int340x_thermal.c ++++ b/drivers/acpi/dptf/int340x_thermal.c +@@ -13,10 +13,6 @@ + + #define INT3401_DEVICE 0X01 + static const struct acpi_device_id int340x_thermal_device_ids[] = { +- {"INT1040"}, +- {"INT1043"}, +- {"INT1044"}, +- {"INT1047"}, + {"INT3400"}, + {"INT3401", INT3401_DEVICE}, + {"INT3402"}, +@@ -28,6 +24,10 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { + {"INT3409"}, + {"INT340A"}, + {"INT340B"}, ++ {"INTC1040"}, ++ {"INTC1043"}, ++ {"INTC1044"}, ++ {"INTC1047"}, + {""}, + }; + +diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c +index 532a1ae3595a..a0bd56ece3ff 100644 +--- a/drivers/acpi/processor_throttling.c ++++ b/drivers/acpi/processor_throttling.c +@@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data) + return pr->throttling.acpi_processor_get_throttling(pr); + } + +-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +-{ +- if (direct || (is_percpu_thread() && cpu == smp_processor_id())) +- return fn(arg); +- return work_on_cpu(cpu, fn, arg); +-} +- + static int acpi_processor_get_throttling(struct acpi_processor *pr) + { + if (!pr) +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 6343402c09e6..27b80df49ba2 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -4554,6 +4554,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) + cancel_work_sync(&rbd_dev->unlock_work); + } + ++/* ++ * header_rwsem must not be held to avoid a deadlock with ++ * rbd_dev_refresh() when flushing notifies. ++ */ + static void rbd_unregister_watch(struct rbd_device *rbd_dev) + { + cancel_tasks_sync(rbd_dev); +@@ -6951,9 +6955,10 @@ static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap) + + static void rbd_dev_image_release(struct rbd_device *rbd_dev) + { +- rbd_dev_unprobe(rbd_dev); +- if (rbd_dev->opts) ++ if (!rbd_is_ro(rbd_dev)) + rbd_unregister_watch(rbd_dev); ++ ++ rbd_dev_unprobe(rbd_dev); + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; +@@ -6964,6 +6969,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) + * device. If this image is the one being mapped (i.e., not a + * parent), initiate a watch on its header object before using that + * object to get detailed information about the rbd image. ++ * ++ * On success, returns with header_rwsem held for write if called ++ * with @depth == 0. + */ + static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + { +@@ -6993,11 +7001,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + } + } + ++ if (!depth) ++ down_write(&rbd_dev->header_rwsem); ++ + ret = rbd_dev_header_info(rbd_dev); + if (ret) { + if (ret == -ENOENT && !need_watch) + rbd_print_dne(rbd_dev, false); +- goto err_out_watch; ++ goto err_out_probe; + } + + /* +@@ -7042,10 +7053,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + return 0; + + err_out_probe: +- rbd_dev_unprobe(rbd_dev); +-err_out_watch: ++ if (!depth) ++ up_write(&rbd_dev->header_rwsem); + if (need_watch) + rbd_unregister_watch(rbd_dev); ++ rbd_dev_unprobe(rbd_dev); + err_out_format: + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); +@@ -7107,12 +7119,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, + goto err_out_rbd_dev; + } + +- down_write(&rbd_dev->header_rwsem); + rc = rbd_dev_image_probe(rbd_dev, 0); +- if (rc < 0) { +- up_write(&rbd_dev->header_rwsem); ++ if (rc < 0) + goto err_out_rbd_dev; +- } + + if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) { + rbd_warn(rbd_dev, "alloc_size adjusted to %u", +diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c +index bda92980e015..c0895c993cce 100644 +--- a/drivers/clk/at91/clk-usb.c ++++ b/drivers/clk/at91/clk-usb.c +@@ -75,6 +75,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, + tmp_parent_rate = req->rate * div; + tmp_parent_rate = clk_hw_round_rate(parent, + tmp_parent_rate); ++ if (!tmp_parent_rate) ++ continue; ++ + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); + if (tmp_rate < req->rate) + tmp_diff = req->rate - tmp_rate; +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 95adf6c6db3d..305544b68b8a 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -2660,12 +2660,14 @@ static int clk_core_get_phase(struct clk_core *core) + { + int ret; + +- clk_prepare_lock(); ++ lockdep_assert_held(&prepare_lock); ++ if (!core->ops->get_phase) ++ return 0; ++ + /* Always try to update cached phase if possible */ +- if (core->ops->get_phase) +- core->phase = core->ops->get_phase(core->hw); +- ret = core->phase; +- clk_prepare_unlock(); ++ ret = core->ops->get_phase(core->hw); ++ if (ret >= 0) ++ core->phase = ret; + + return ret; + } +@@ -2679,10 +2681,16 @@ static int clk_core_get_phase(struct clk_core *core) + */ + int clk_get_phase(struct clk *clk) + { ++ int ret; ++ + if (!clk) + return 0; + +- return clk_core_get_phase(clk->core); ++ clk_prepare_lock(); ++ ret = clk_core_get_phase(clk->core); ++ clk_prepare_unlock(); ++ ++ return ret; + } + EXPORT_SYMBOL_GPL(clk_get_phase); + +@@ -2896,13 +2904,21 @@ static struct hlist_head *orphan_list[] = { + static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, + int level) + { +- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", ++ int phase; ++ ++ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", + level * 3 + 1, "", + 30 - level * 3, c->name, + c->enable_count, c->prepare_count, c->protect_count, +- clk_core_get_rate(c), clk_core_get_accuracy(c), +- clk_core_get_phase(c), +- clk_core_get_scaled_duty_cycle(c, 100000)); ++ clk_core_get_rate(c), clk_core_get_accuracy(c)); ++ ++ phase = clk_core_get_phase(c); ++ if (phase >= 0) ++ seq_printf(s, "%5d", phase); ++ else ++ seq_puts(s, "-----"); ++ ++ seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000)); + } + + static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, +@@ -2939,6 +2955,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary); + + static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) + { ++ int phase; + unsigned long min_rate, max_rate; + + clk_core_get_boundaries(c, &min_rate, &max_rate); +@@ -2952,7 +2969,9 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) + seq_printf(s, "\"min_rate\": %lu,", min_rate); + seq_printf(s, "\"max_rate\": %lu,", max_rate); + seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); +- seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); ++ phase = clk_core_get_phase(c); ++ if (phase >= 0) ++ seq_printf(s, "\"phase\": %d,", phase); + seq_printf(s, "\"duty_cycle\": %u", + clk_core_get_scaled_duty_cycle(c, 100000)); + } +@@ -3434,14 +3453,11 @@ static int __clk_core_init(struct clk_core *core) + core->accuracy = 0; + + /* +- * Set clk's phase. ++ * Set clk's phase by clk_core_get_phase() caching the phase. + * Since a phase is by definition relative to its parent, just + * query the current clock phase, or just assume it's in phase. + */ +- if (core->ops->get_phase) +- core->phase = core->ops->get_phase(core->hw); +- else +- core->phase = 0; ++ clk_core_get_phase(core); + + /* + * Set clk's duty cycle. +diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c +index 5b0519a81a7a..37e311e1d058 100644 +--- a/drivers/clk/imx/clk-pll14xx.c ++++ b/drivers/clk/imx/clk-pll14xx.c +@@ -55,8 +55,10 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = { + }; + + static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = { ++ PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384), + PLL_1443X_RATE(650000000U, 325, 3, 2, 0), + PLL_1443X_RATE(594000000U, 198, 2, 2, 0), ++ PLL_1443X_RATE(519750000U, 173, 2, 2, 16384), + PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), + PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), + }; +diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c +index bec3e008335f..5e044ba1ae36 100644 +--- a/drivers/clk/tegra/clk-tegra-pmc.c ++++ b/drivers/clk/tegra/clk-tegra-pmc.c +@@ -49,16 +49,16 @@ struct pmc_clk_init_data { + + static DEFINE_SPINLOCK(clk_out_lock); + +-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern1", ++static const char *clk_out1_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern1", + }; + +-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern2", ++static const char *clk_out2_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern2", + }; + +-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern3", ++static const char *clk_out3_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern3", + }; + + static struct pmc_clk_init_data pmc_clks[] = { +diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c +index 7da893dc00e7..46db5bf366b4 100644 +--- a/drivers/crypto/qce/dma.c ++++ b/drivers/crypto/qce/dma.c +@@ -48,9 +48,10 @@ void qce_dma_release(struct qce_dma_data *dma) + + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, +- int max_ents) ++ unsigned int max_len) + { + struct scatterlist *sg = sgt->sgl, *sg_last = NULL; ++ unsigned int new_len; + + while (sg) { + if (!sg_page(sg)) +@@ -61,13 +62,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, + if (!sg) + return ERR_PTR(-EINVAL); + +- while (new_sgl && sg && max_ents) { +- sg_set_page(sg, sg_page(new_sgl), new_sgl->length, +- new_sgl->offset); ++ while (new_sgl && sg && max_len) { ++ new_len = new_sgl->length > max_len ? max_len : new_sgl->length; ++ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset); + sg_last = sg; + sg = sg_next(sg); + new_sgl = sg_next(new_sgl); +- max_ents--; ++ max_len -= new_len; + } + + return sg_last; +diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h +index ed25a0d9829e..786402169360 100644 +--- a/drivers/crypto/qce/dma.h ++++ b/drivers/crypto/qce/dma.h +@@ -43,6 +43,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma); + int qce_dma_terminate_all(struct qce_dma_data *dma); + struct scatterlist * + qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, +- int max_ents); ++ unsigned int max_len); + + #endif /* _DMA_H_ */ +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c +index 4217b745f124..63ae75809cb7 100644 +--- a/drivers/crypto/qce/skcipher.c ++++ b/drivers/crypto/qce/skcipher.c +@@ -97,13 +97,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) + + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); + +- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; + } + +- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); ++ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, ++ QCE_RESULT_BUF_SZ); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto error_free; +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c +index ada69e722f84..f6f49f0f6fae 100644 +--- a/drivers/dma/idxd/device.c ++++ b/drivers/dma/idxd/device.c +@@ -584,11 +584,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) + struct idxd_group *group = &idxd->groups[i]; + + if (group->tc_a == -1) +- group->grpcfg.flags.tc_a = 0; ++ group->tc_a = group->grpcfg.flags.tc_a = 0; + else + group->grpcfg.flags.tc_a = group->tc_a; + if (group->tc_b == -1) +- group->grpcfg.flags.tc_b = 1; ++ group->tc_b = group->grpcfg.flags.tc_b = 1; + else + group->grpcfg.flags.tc_b = group->tc_b; + group->grpcfg.flags.use_token_limit = group->use_token_limit; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 2a9e40131735..0d70cb2248fe 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -1104,9 +1104,9 @@ kfd_gtt_out: + return 0; + + kfd_gtt_no_free_chunk: +- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); ++ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); + mutex_unlock(&kfd->gtt_sa_lock); +- kfree(mem_obj); ++ kfree(*mem_obj); + return -ENOMEM; + } + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +index 0acd3409dd6c..3abeff7722e3 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +@@ -113,10 +113,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + + if (enable_encryption) { + display->adjust.disable = 0; +- if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) ++ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { ++ hdcp_w->link.adjust.hdcp1.disable = 0; + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; +- else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) ++ } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { ++ hdcp_w->link.adjust.hdcp1.disable = 1; + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; ++ } + + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); +@@ -334,6 +337,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + display->adjust.disable = 1; + link->adjust.auth_delay = 2; ++ link->adjust.hdcp1.disable = 0; + + hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index b65ae817eabf..2d4c899e1f8b 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev) + kfree(drm); + } + ++/* ++ * On some Intel PCIe bridge controllers doing a ++ * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear. ++ * Skipping the intermediate D3hot step seems to make it work again. This is ++ * probably caused by not meeting the expectation the involved AML code has ++ * when the GPU is put into D3hot state before invoking it. ++ * ++ * This leads to various manifestations of this issue: ++ * - AML code execution to power on the GPU hits an infinite loop (as the ++ * code waits on device memory to change). ++ * - kernel crashes, as all PCI reads return -1, which most code isn't able ++ * to handle well enough. ++ * ++ * In all cases dmesg will contain at least one line like this: ++ * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3' ++ * followed by a lot of nouveau timeouts. ++ * ++ * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not ++ * documented PCI config space register 0x248 of the Intel PCIe bridge ++ * controller (0x1901) in order to change the state of the PCIe link between ++ * the PCIe port and the GPU. There are alternative code paths using other ++ * registers, which seem to work fine (executed pre Windows 8): ++ * - 0xbc bit 0x20 (publicly available documentation claims 'reserved') ++ * - 0xb0 bit 0x10 (link disable) ++ * Changing the conditions inside the firmware by poking into the relevant ++ * addresses does resolve the issue, but it seemed to be ACPI private memory ++ * and not any device accessible memory at all, so there is no portable way of ++ * changing the conditions. ++ * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared. ++ * ++ * The only systems where this behavior can be seen are hybrid graphics laptops ++ * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether ++ * this issue only occurs in combination with listed Intel PCIe bridge ++ * controllers and the mentioned GPUs or other devices as well. ++ * ++ * documentation on the PCIe bridge controller can be found in the ++ * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2" ++ * Section "12 PCI Express* Controller (x16) Registers" ++ */ ++ ++static void quirk_broken_nv_runpm(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct nouveau_drm *drm = nouveau_drm(dev); ++ struct pci_dev *bridge = pci_upstream_bridge(pdev); ++ ++ if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL) ++ return; ++ ++ switch (bridge->device) { ++ case 0x1901: ++ drm->old_pm_cap = pdev->pm_cap; ++ pdev->pm_cap = 0; ++ NV_INFO(drm, "Disabling PCI power management to avoid bug\n"); ++ break; ++ } ++} ++ + static int nouveau_drm_probe(struct pci_dev *pdev, + const struct pci_device_id *pent) + { +@@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, + if (ret) + goto fail_drm_dev_init; + ++ quirk_broken_nv_runpm(pdev); + return 0; + + fail_drm_dev_init: +@@ -734,7 +793,11 @@ static void + nouveau_drm_remove(struct pci_dev *pdev) + { + struct drm_device *dev = pci_get_drvdata(pdev); ++ struct nouveau_drm *drm = nouveau_drm(dev); + ++ /* revert our workaround */ ++ if (drm->old_pm_cap) ++ pdev->pm_cap = drm->old_pm_cap; + nouveau_drm_device_remove(dev); + pci_disable_device(pdev); + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index c2c332fbde97..2a6519737800 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -140,6 +140,8 @@ struct nouveau_drm { + + struct list_head clients; + ++ u8 old_pm_cap; ++ + struct { + struct agp_bridge_data *bridge; + u32 base; +diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c +index df9bf1fd1bc0..c567526b75b8 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_svm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c +@@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, + mm = get_task_mm(current); + down_read(&mm->mmap_sem); + ++ if (!cli->svm.svmm) { ++ up_read(&mm->mmap_sem); ++ return -EINVAL; ++ } ++ + for (addr = args->va_start, end = args->va_start + size; addr < end;) { + struct vm_area_struct *vma; + unsigned long next; +@@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, + if (!vma) + break; + ++ addr = max(addr, vma->vm_start); + next = min(vma->vm_end, end); + /* This is a best effort so we ignore errors */ + nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +index dd8f85b8b3a7..f2f5636efac4 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +@@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base) + { + struct gf100_gr *gr = gf100_gr(base); + struct nvkm_subdev *subdev = &base->engine.subdev; ++ struct nvkm_device *device = subdev->device; ++ bool reset = device->chipset == 0x137 || device->chipset == 0x138; + u32 ret; + ++ /* On certain GP107/GP108 boards, we trigger a weird issue where ++ * GR will stop responding to PRI accesses after we've asked the ++ * SEC2 RTOS to boot the GR falcons. This happens with far more ++ * frequency when cold-booting a board (ie. returning from D3). ++ * ++ * The root cause for this is not known and has proven difficult ++ * to isolate, with many avenues being dead-ends. ++ * ++ * A workaround was discovered by Karol, whereby putting GR into ++ * reset for an extended period right before initialisation ++ * prevents the problem from occuring. ++ * ++ * XXX: As RM does not require any such workaround, this is more ++ * of a hack than a true fix. ++ */ ++ reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset); ++ if (reset) { ++ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); ++ nvkm_rd32(device, 0x000200); ++ msleep(50); ++ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); ++ nvkm_rd32(device, 0x000200); ++ } ++ + nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); + + ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index 5df596fb0280..fe420ca454e0 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -498,8 +498,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) + + dma_resv_unlock(bo->base.resv); + } +- if (bo->base.resv != &bo->base._resv) ++ if (bo->base.resv != &bo->base._resv) { ++ ttm_bo_flush_all_fences(bo); + dma_resv_unlock(&bo->base._resv); ++ } + + error: + kref_get(&bo->list_kref); +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c +index cea18dc15f77..340719238753 100644 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c +@@ -681,11 +681,23 @@ static enum drm_mode_status + vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, + const struct drm_display_mode *mode) + { +- /* HSM clock must be 108% of the pixel clock. Additionally, +- * the AXI clock needs to be at least 25% of pixel clock, but +- * HSM ends up being the limiting factor. ++ /* ++ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must ++ * be faster than pixel clock, infinitesimally faster, tested in ++ * simulation. Otherwise, exact value is unimportant for HDMI ++ * operation." This conflicts with bcm2835's vc4 documentation, which ++ * states HSM's clock has to be at least 108% of the pixel clock. ++ * ++ * Real life tests reveal that vc4's firmware statement holds up, and ++ * users are able to use pixel clocks closer to HSM's, namely for ++ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between ++ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of ++ * 162MHz. ++ * ++ * Additionally, the AXI clock needs to be at least 25% of ++ * pixel clock, but HSM ends up being the limiting factor. + */ +- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) ++ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index 0370364169c4..501c43c5851d 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -839,6 +839,9 @@ void vmbus_initiate_unload(bool crash) + { + struct vmbus_channel_message_header hdr; + ++ if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED) ++ return; ++ + /* Pre-Win2012R2 hosts don't support reconnect */ + if (vmbus_proto_version < VERSION_WIN8_1) + return; +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 029378c27421..a68bce4d0ddb 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -31,6 +31,7 @@ + #include <linux/kdebug.h> + #include <linux/efi.h> + #include <linux/random.h> ++#include <linux/kernel.h> + #include <linux/syscore_ops.h> + #include <clocksource/hyperv_timer.h> + #include "hyperv_vmbus.h" +@@ -48,14 +49,35 @@ static int hyperv_cpuhp_online; + + static void *hv_panic_page; + ++/* ++ * Boolean to control whether to report panic messages over Hyper-V. ++ * ++ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg ++ */ ++static int sysctl_record_panic_msg = 1; ++ ++static int hyperv_report_reg(void) ++{ ++ return !sysctl_record_panic_msg || !hv_panic_page; ++} ++ + static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, + void *args) + { + struct pt_regs *regs; + +- regs = current_pt_regs(); ++ vmbus_initiate_unload(true); + +- hyperv_report_panic(regs, val); ++ /* ++ * Hyper-V should be notified only once about a panic. If we will be ++ * doing hyperv_report_panic_msg() later with kmsg data, don't do ++ * the notification here. ++ */ ++ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE ++ && hyperv_report_reg()) { ++ regs = current_pt_regs(); ++ hyperv_report_panic(regs, val, false); ++ } + return NOTIFY_DONE; + } + +@@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, + struct die_args *die = (struct die_args *)args; + struct pt_regs *regs = die->regs; + +- hyperv_report_panic(regs, val); ++ /* ++ * Hyper-V should be notified only once about a panic. If we will be ++ * doing hyperv_report_panic_msg() later with kmsg data, don't do ++ * the notification here. ++ */ ++ if (hyperv_report_reg()) ++ hyperv_report_panic(regs, val, true); + return NOTIFY_DONE; + } + +@@ -1252,13 +1280,6 @@ static void vmbus_isr(void) + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); + } + +-/* +- * Boolean to control whether to report panic messages over Hyper-V. +- * +- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg +- */ +-static int sysctl_record_panic_msg = 1; +- + /* + * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg + * buffer and call into Hyper-V to transfer the data. +@@ -1382,19 +1403,29 @@ static int vmbus_bus_init(void) + hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page(); + if (hv_panic_page) { + ret = kmsg_dump_register(&hv_kmsg_dumper); +- if (ret) ++ if (ret) { + pr_err("Hyper-V: kmsg dump register " + "error 0x%x\n", ret); ++ hv_free_hyperv_page( ++ (unsigned long)hv_panic_page); ++ hv_panic_page = NULL; ++ } + } else + pr_err("Hyper-V: panic message page memory " + "allocation failed"); + } + + register_die_notifier(&hyperv_die_block); +- atomic_notifier_chain_register(&panic_notifier_list, +- &hyperv_panic_block); + } + ++ /* ++ * Always register the panic notifier because we need to unload ++ * the VMbus channel connection to prevent any VMbus ++ * activity after the VM panics. ++ */ ++ atomic_notifier_chain_register(&panic_notifier_list, ++ &hyperv_panic_block); ++ + vmbus_request_offers(); + + return 0; +@@ -1407,7 +1438,6 @@ err_alloc: + hv_remove_vmbus_irq(); + + bus_unregister(&hv_bus); +- hv_free_hyperv_page((unsigned long)hv_panic_page); + unregister_sysctl_table(hv_ctl_table_hdr); + hv_ctl_table_hdr = NULL; + return ret; +@@ -2204,8 +2234,6 @@ static int vmbus_bus_suspend(struct device *dev) + + vmbus_initiate_unload(false); + +- vmbus_connection.conn_state = DISCONNECTED; +- + /* Reset the event for the next resume. */ + reinit_completion(&vmbus_connection.ready_for_resume_event); + +@@ -2289,7 +2317,6 @@ static void hv_kexec_handler(void) + { + hv_stimer_global_cleanup(); + vmbus_initiate_unload(false); +- vmbus_connection.conn_state = DISCONNECTED; + /* Make sure conn_state is set as hv_synic_cleanup checks for it */ + mb(); + cpuhp_remove_state(hyperv_cpuhp_online); +@@ -2306,7 +2333,6 @@ static void hv_crash_handler(struct pt_regs *regs) + * doing the cleanup for current CPU only. This should be sufficient + * for kdump. + */ +- vmbus_connection.conn_state = DISCONNECTED; + cpu = smp_processor_id(); + hv_stimer_cleanup(cpu); + hv_synic_disable_regs(cpu); +diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c +index e051edbc43c1..0e35ff06f9af 100644 +--- a/drivers/iio/common/st_sensors/st_sensors_core.c ++++ b/drivers/iio/common/st_sensors/st_sensors_core.c +@@ -328,6 +328,8 @@ static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev, + return NULL; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); ++ if (!pdata) ++ return ERR_PTR(-ENOMEM); + if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2)) + pdata->drdy_int_pin = (u8) val; + else +@@ -371,6 +373,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, + + /* If OF/DT pdata exists, it will take precedence of anything else */ + of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); ++ if (IS_ERR(of_pdata)) ++ return PTR_ERR(of_pdata); + if (of_pdata) + pdata = of_pdata; + +diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c +index 015a21f0c2ef..9174ab928880 100644 +--- a/drivers/iio/light/si1133.c ++++ b/drivers/iio/light/si1133.c +@@ -102,6 +102,9 @@ + #define SI1133_INPUT_FRACTION_LOW 15 + #define SI1133_LUX_OUTPUT_FRACTION 12 + #define SI1133_LUX_BUFFER_SIZE 9 ++#define SI1133_MEASURE_BUFFER_SIZE 3 ++ ++#define SI1133_SIGN_BIT_INDEX 23 + + static const int si1133_scale_available[] = { + 1, 2, 4, 8, 16, 32, 64, 128}; +@@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = { + } + }; + +-static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag, ++static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag, + s8 shift) + { + return ((input << fraction) / mag) << shift; + } + +-static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, ++static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order, + u8 input_fraction, s8 sign, + const struct si1133_coeff *coeffs) + { +@@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, + * The algorithm is from: + * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716 + */ +-static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff, ++static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff, + const struct si1133_coeff *coeffs) + { + u8 x_order, y_order; +@@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data, + { + int err; + +- __be16 resp; ++ u8 buffer[SI1133_MEASURE_BUFFER_SIZE]; + + err = si1133_set_adcmux(data, 0, chan->channel); + if (err) +@@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data, + if (err) + return err; + +- err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp), +- (u8 *)&resp); ++ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer), ++ buffer); + if (err) + return err; + +- *val = be16_to_cpu(resp); ++ *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], ++ SI1133_SIGN_BIT_INDEX); + + return err; + } +@@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val) + { + int err; + int lux; +- u32 high_vis; +- u32 low_vis; +- u32 ir; ++ s32 high_vis; ++ s32 low_vis; ++ s32 ir; + u8 buffer[SI1133_LUX_BUFFER_SIZE]; + + /* Activate lux channels */ +@@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val) + if (err) + return err; + +- high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2]; +- low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5]; +- ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8]; ++ high_vis = ++ sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], ++ SI1133_SIGN_BIT_INDEX); ++ ++ low_vis = ++ sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5], ++ SI1133_SIGN_BIT_INDEX); ++ ++ ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8], ++ SI1133_SIGN_BIT_INDEX); + + if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD) + lux = si1133_calc_polynomial(high_vis, ir, +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig +index d2fade984999..25149544d57c 100644 +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -188,6 +188,7 @@ config INTEL_IOMMU + select NEED_DMA_MAP_STATE + select DMAR_TABLE + select SWIOTLB ++ select IOASID + help + DMA remapping (DMAR) devices support enables independent address + translations for Direct Memory Access (DMA) from devices. +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h +index f8d01d6b00da..ca8c4522045b 100644 +--- a/drivers/iommu/amd_iommu_types.h ++++ b/drivers/iommu/amd_iommu_types.h +@@ -348,7 +348,7 @@ + + #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) + #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) +-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) ++#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) + + #define DTE_GCR3_INDEX_A 0 + #define DTE_GCR3_INDEX_B 1 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 4be549478691..ef0a5246700e 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) + struct dmar_atsr_unit *atsru; + struct acpi_dmar_atsr *tmp; + +- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { ++ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list, ++ dmar_rcu_check()) { + tmp = (struct acpi_dmar_atsr *)atsru->hdr; + if (atsr->segment != tmp->segment) + continue; +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index d7f2a5358900..2998418f0a38 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -531,7 +531,7 @@ struct page_req_dsc { + u64 priv_data[2]; + }; + +-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) ++#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) + + static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) + { +@@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) + * any faults on kernel addresses. */ + if (!svm->mm) + goto bad_req; +- /* If the mm is already defunct, don't handle faults. */ +- if (!mmget_not_zero(svm->mm)) +- goto bad_req; + + /* If address is not canonical, return invalid response */ + if (!is_canonical_address(address)) + goto bad_req; + ++ /* If the mm is already defunct, don't handle faults. */ ++ if (!mmget_not_zero(svm->mm)) ++ goto bad_req; ++ + down_read(&svm->mm->mmap_sem); + vma = find_extend_vma(svm->mm, address); + if (!vma || address < vma->vm_start) +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c +index cce329d71fba..5eed75cd121f 100644 +--- a/drivers/iommu/virtio-iommu.c ++++ b/drivers/iommu/virtio-iommu.c +@@ -613,18 +613,20 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, + int ret; + struct viommu_domain *vdomain = to_viommu_domain(domain); + +- vdomain->viommu = viommu; +- vdomain->map_flags = viommu->map_flags; ++ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, ++ viommu->last_domain, GFP_KERNEL); ++ if (ret < 0) ++ return ret; ++ ++ vdomain->id = (unsigned int)ret; + + domain->pgsize_bitmap = viommu->pgsize_bitmap; + domain->geometry = viommu->geometry; + +- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, +- viommu->last_domain, GFP_KERNEL); +- if (ret >= 0) +- vdomain->id = (unsigned int)ret; ++ vdomain->map_flags = viommu->map_flags; ++ vdomain->viommu = viommu; + +- return ret > 0 ? 0 : ret; ++ return 0; + } + + static void viommu_domain_free(struct iommu_domain *domain) +diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c +index 6b566bba263b..ff7627b57772 100644 +--- a/drivers/irqchip/irq-mbigen.c ++++ b/drivers/irqchip/irq-mbigen.c +@@ -220,10 +220,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, + return 0; + } + ++static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ platform_msi_domain_free(domain, virq, nr_irqs); ++} ++ + static const struct irq_domain_ops mbigen_domain_ops = { + .translate = mbigen_domain_translate, + .alloc = mbigen_irq_domain_alloc, +- .free = irq_domain_free_irqs_common, ++ .free = mbigen_irq_domain_free, + }; + + static int mbigen_of_create_domain(struct platform_device *pdev, +diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c +index 1fc40e8af75e..3363a6551a70 100644 +--- a/drivers/leds/led-class.c ++++ b/drivers/leds/led-class.c +@@ -376,7 +376,7 @@ int led_classdev_register_ext(struct device *parent, + + if (ret) + dev_warn(parent, "Led %s renamed to %s due to name collision", +- led_cdev->name, dev_name(led_cdev->dev)); ++ proposed_name, dev_name(led_cdev->dev)); + + if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) { + ret = led_add_brightness_hw_changed(led_cdev); +diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c +index 21f05240682b..33b8216bac30 100644 +--- a/drivers/memory/tegra/tegra124-emc.c ++++ b/drivers/memory/tegra/tegra124-emc.c +@@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc) + emc->debugfs.max_rate = emc->timings[i].rate; + } + ++ if (!emc->num_timings) { ++ emc->debugfs.min_rate = clk_get_rate(emc->clk); ++ emc->debugfs.max_rate = emc->debugfs.min_rate; ++ } ++ + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { +diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c +index 8ae474d9bfb9..b16715e9515d 100644 +--- a/drivers/memory/tegra/tegra20-emc.c ++++ b/drivers/memory/tegra/tegra20-emc.c +@@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc) + emc->debugfs.max_rate = emc->timings[i].rate; + } + ++ if (!emc->num_timings) { ++ emc->debugfs.min_rate = clk_get_rate(emc->clk); ++ emc->debugfs.max_rate = emc->debugfs.min_rate; ++ } ++ + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { +diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c +index e3efd9529506..b42bdb667e85 100644 +--- a/drivers/memory/tegra/tegra30-emc.c ++++ b/drivers/memory/tegra/tegra30-emc.c +@@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc) + emc->debugfs.max_rate = emc->timings[i].rate; + } + ++ if (!emc->num_timings) { ++ emc->debugfs.min_rate = clk_get_rate(emc->clk); ++ emc->debugfs.max_rate = emc->debugfs.min_rate; ++ } ++ + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { +diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c +index 39e611695053..32c2b912b58b 100644 +--- a/drivers/mfd/cros_ec_dev.c ++++ b/drivers/mfd/cros_ec_dev.c +@@ -211,7 +211,7 @@ static int ec_device_probe(struct platform_device *pdev) + * explicitly added on platforms that don't have the PD notifier ACPI + * device entry defined. + */ +- if (IS_ENABLED(CONFIG_OF)) { ++ if (IS_ENABLED(CONFIG_OF) && ec->ec_dev->dev->of_node) { + if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) { + retval = mfd_add_hotplug_devices(ec->dev, + cros_usbpd_notify_cells, +diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c +index 931e5c2481b5..b50ec7ecd10c 100644 +--- a/drivers/mtd/devices/phram.c ++++ b/drivers/mtd/devices/phram.c +@@ -243,22 +243,25 @@ static int phram_setup(const char *val) + + ret = parse_num64(&start, token[1]); + if (ret) { +- kfree(name); + parse_err("illegal start address\n"); ++ goto error; + } + + ret = parse_num64(&len, token[2]); + if (ret) { +- kfree(name); + parse_err("illegal device length\n"); ++ goto error; + } + + ret = register_device(name, start, len); +- if (!ret) +- pr_info("%s device: %#llx at %#llx\n", name, len, start); +- else +- kfree(name); ++ if (ret) ++ goto error; ++ ++ pr_info("%s device: %#llx at %#llx\n", name, len, start); ++ return 0; + ++error: ++ kfree(name); + return ret; + } + +diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c +index 1efc643c9871..9341a8a592e8 100644 +--- a/drivers/mtd/lpddr/lpddr_cmds.c ++++ b/drivers/mtd/lpddr/lpddr_cmds.c +@@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) + shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared), + GFP_KERNEL); + if (!shared) { +- kfree(lpddr); + kfree(mtd); + return NULL; + } +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index f64e3b6605c6..47c63968fa45 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -5907,6 +5907,8 @@ void nand_cleanup(struct nand_chip *chip) + chip->ecc.algo == NAND_ECC_BCH) + nand_bch_free((struct nand_bch_control *)chip->ecc.priv); + ++ nanddev_cleanup(&chip->base); ++ + /* Free bad block table memory */ + kfree(chip->bbt); + kfree(chip->data_buf); +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index 5750c45019d8..8dda51bbdd11 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -609,6 +609,7 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) + .ooboffs = 0, + .ooblen = sizeof(marker), + .oobbuf.out = marker, ++ .mode = MTD_OPS_RAW, + }; + int ret; + +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c +index 1962c8330daa..f9785027c096 100644 +--- a/drivers/net/dsa/bcm_sf2_cfp.c ++++ b/drivers/net/dsa/bcm_sf2_cfp.c +@@ -882,17 +882,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + fs->m_ext.data[1])) + return -EINVAL; + +- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) ++ if (fs->location != RX_CLS_LOC_ANY && ++ fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + +- if (fs->location != RX_CLS_LOC_ANY && +- fs->location > bcm_sf2_cfp_rule_size(priv)) +- return -EINVAL; +- + ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); + if (ret == 0) + return -EEXIST; +@@ -973,7 +970,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) + struct cfp_rule *rule; + int ret; + +- if (loc >= CFP_NUM_RULES) ++ if (loc > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + /* Refuse deleting unused rules, and those that are not unique since +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index d2cfa247abc8..9710cdecb63a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1535,6 +1535,10 @@ static int mlx5e_set_fecparam(struct net_device *netdev, + int mode; + int err; + ++ if (bitmap_weight((unsigned long *)&fecparam->fec, ++ ETHTOOL_FEC_BASER_BIT + 1) > 1) ++ return -EOPNOTSUPP; ++ + for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) { + if (!(pplm_fec_2_ethtool[mode] & fecparam->fec)) + continue; +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 061aada4748a..9b4ae5c36da6 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -2398,6 +2398,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) + return PTR_ERR(dev); + macsec = macsec_priv(dev); + ++ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) ++ return -EINVAL; ++ + offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); + if (macsec->offload == offload) + return 0; +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c +index a8b515968569..09087c38fabd 100644 +--- a/drivers/nvdimm/bus.c ++++ b/drivers/nvdimm/bus.c +@@ -1042,8 +1042,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, + return -EFAULT; + } + +- if (!desc || (desc->out_num + desc->in_num == 0) || +- !test_bit(cmd, &cmd_mask)) ++ if (!desc || ++ (desc->out_num + desc->in_num == 0) || ++ cmd > ND_CMD_CALL || ++ !test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + /* fail write commands (when read-only) */ +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c +index c9219fddf44b..50bbe0edf538 100644 +--- a/drivers/of/overlay.c ++++ b/drivers/of/overlay.c +@@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop( + + of_property_set_flag(new_prop, OF_DYNAMIC); + ++ kfree(target_path); ++ + return new_prop; + + err_free_new_prop: +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index 68b87587b2ef..7199aaafd304 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -777,6 +777,10 @@ static void __init of_unittest_changeset(void) + unittest(!of_changeset_revert(&chgset), "revert failed\n"); + + of_changeset_destroy(&chgset); ++ ++ of_node_put(n1); ++ of_node_put(n2); ++ of_node_put(n21); + #endif + } + +@@ -1151,10 +1155,13 @@ static void __init of_unittest_platform_populate(void) + + of_platform_populate(np, match, NULL, &test_bus->dev); + for_each_child_of_node(np, child) { +- for_each_child_of_node(child, grandchild) +- unittest(of_find_device_by_node(grandchild), ++ for_each_child_of_node(child, grandchild) { ++ pdev = of_find_device_by_node(grandchild); ++ unittest(pdev, + "Could not create device for node '%pOFn'\n", + grandchild); ++ of_dev_put(pdev); ++ } + } + + of_platform_depopulate(&test_bus->dev); +@@ -2564,8 +2571,11 @@ static __init void of_unittest_overlay_high_level(void) + goto err_unlock; + } + if (__of_add_property(of_symbols, new_prop)) { ++ kfree(new_prop->name); ++ kfree(new_prop->value); ++ kfree(new_prop); + /* "name" auto-generated by unflatten */ +- if (!strcmp(new_prop->name, "name")) ++ if (!strcmp(prop->name, "name")) + continue; + unittest(0, "duplicate property '%s' in overlay_base node __symbols__", + prop->name); +diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c +index ec231e40ef2a..a7577e316baf 100644 +--- a/drivers/phy/socionext/phy-uniphier-usb3ss.c ++++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c +@@ -314,6 +314,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = { + .compatible = "socionext,uniphier-pro4-usb3-ssphy", + .data = &uniphier_pro4_data, + }, ++ { ++ .compatible = "socionext,uniphier-pro5-usb3-ssphy", ++ .data = &uniphier_pro4_data, ++ }, + { + .compatible = "socionext,uniphier-pxs2-usb3-ssphy", + .data = &uniphier_pxs2_data, +diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c +index 6fc8f2c3ac51..7ee43b2e0654 100644 +--- a/drivers/platform/chrome/cros_ec.c ++++ b/drivers/platform/chrome/cros_ec.c +@@ -138,6 +138,24 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event) + return ret; + } + ++static int cros_ec_ready_event(struct notifier_block *nb, ++ unsigned long queued_during_suspend, ++ void *_notify) ++{ ++ struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device, ++ notifier_ready); ++ u32 host_event = cros_ec_get_host_event(ec_dev); ++ ++ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) { ++ mutex_lock(&ec_dev->lock); ++ cros_ec_query_all(ec_dev); ++ mutex_unlock(&ec_dev->lock); ++ return NOTIFY_OK; ++ } ++ ++ return NOTIFY_DONE; ++} ++ + /** + * cros_ec_register() - Register a new ChromeOS EC, using the provided info. + * @ec_dev: Device to register. +@@ -237,6 +255,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev) + dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec", + err); + ++ if (ec_dev->mkbp_event_supported) { ++ /* ++ * Register the notifier for EC_HOST_EVENT_INTERFACE_READY ++ * event. ++ */ ++ ec_dev->notifier_ready.notifier_call = cros_ec_ready_event; ++ err = blocking_notifier_chain_register(&ec_dev->event_notifier, ++ &ec_dev->notifier_ready); ++ if (err) ++ return err; ++ } ++ + dev_info(dev, "Chrome EC device registered\n"); + + return 0; +diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c +index 43d590250228..9c0e6e0fabdf 100644 +--- a/drivers/platform/x86/intel-hid.c ++++ b/drivers/platform/x86/intel-hid.c +@@ -19,8 +19,8 @@ MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Alex Hung"); + + static const struct acpi_device_id intel_hid_ids[] = { +- {"INT1051", 0}, + {"INT33D5", 0}, ++ {"INTC1051", 0}, + {"", 0}, + }; + +diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c +index e1bc4e6e6f30..f40fa0e63b6e 100644 +--- a/drivers/power/supply/axp288_fuel_gauge.c ++++ b/drivers/power/supply/axp288_fuel_gauge.c +@@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { + { + /* Intel Cherry Trail Compute Stick, Windows version */ + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), + }, + }, + { + /* Intel Cherry Trail Compute Stick, version without an OS */ + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), + }, + }, +diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c +index 195c18c2f426..664e50103eaa 100644 +--- a/drivers/power/supply/bq27xxx_battery.c ++++ b/drivers/power/supply/bq27xxx_battery.c +@@ -1885,7 +1885,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) + + di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); + if (IS_ERR(di->bat)) { +- dev_err(di->dev, "failed to register battery\n"); ++ if (PTR_ERR(di->bat) == -EPROBE_DEFER) ++ dev_dbg(di->dev, "failed to register battery, deferring probe\n"); ++ else ++ dev_err(di->dev, "failed to register battery\n"); + return PTR_ERR(di->bat); + } + +diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c +index 4743b16a8d84..1526402e126b 100644 +--- a/drivers/rtc/rtc-88pm860x.c ++++ b/drivers/rtc/rtc-88pm860x.c +@@ -336,6 +336,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + info->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, info); + ++ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(info->rtc_dev)) ++ return PTR_ERR(info->rtc_dev); ++ + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, + rtc_update_handler, IRQF_ONESHOT, "rtc", + info); +@@ -377,13 +381,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + } + } + +- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", +- &pm860x_rtc_ops, THIS_MODULE); +- ret = PTR_ERR(info->rtc_dev); +- if (IS_ERR(info->rtc_dev)) { +- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); ++ info->rtc_dev->ops = &pm860x_rtc_ops; ++ ++ ret = rtc_register_device(info->rtc_dev); ++ if (ret) + return ret; +- } + + /* + * enable internal XO instead of internal 3.25MHz clock since it can +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 4e6af592f018..9c0ee192f0f9 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -793,8 +793,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int) cmnd[0], (int) hp->cmd_len)); + +- if (hp->dxfer_len >= SZ_256M) ++ if (hp->dxfer_len >= SZ_256M) { ++ sg_remove_request(sfp, srp); + return -EINVAL; ++ } + + k = sg_start_req(srp, cmnd); + if (k) { +diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c +index 98b9d9a902ae..90a8b2c0676f 100644 +--- a/drivers/soc/imx/gpc.c ++++ b/drivers/soc/imx/gpc.c +@@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) + static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + { + struct imx_pm_domain *pd = to_imx_pm_domain(genpd); +- int i, ret, sw, sw2iso; +- u32 val; ++ int i, ret; ++ u32 val, req; + + if (pd->supply) { + ret = regulator_enable(pd->supply); +@@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, + 0x1, 0x1); + +- /* Read ISO and ISO2SW power up delays */ +- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); +- sw = val & 0x3f; +- sw2iso = (val >> 8) & 0x3f; +- + /* Request GPC to power up domain */ +- val = BIT(pd->cntr_pdn_bit + 1); +- regmap_update_bits(pd->regmap, GPC_CNTR, val, val); ++ req = BIT(pd->cntr_pdn_bit + 1); ++ regmap_update_bits(pd->regmap, GPC_CNTR, req, req); + +- /* Wait ISO + ISO2SW IPG clock cycles */ +- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); ++ /* Wait for the PGC to handle the request */ ++ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), ++ 1, 50); ++ if (ret) ++ pr_err("powerup request on domain %s timed out\n", genpd->name); ++ ++ /* Wait for reset to propagate through peripherals */ ++ usleep_range(5, 10); + + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < pd->num_clks; i++) +@@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = { + .rd_table = &access_table, + .wr_table = &access_table, + .max_register = 0x2ac, ++ .fast_io = true, + }; + + static struct generic_pm_domain *imx_gpc_onecell_domains[] = { +diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig +index 5a05db5438d6..5a0df0e54ce3 100644 +--- a/drivers/thermal/Kconfig ++++ b/drivers/thermal/Kconfig +@@ -265,6 +265,7 @@ config QORIQ_THERMAL + tristate "QorIQ Thermal Monitoring Unit" + depends on THERMAL_OF + depends on HAS_IOMEM ++ select REGMAP_MMIO + help + Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms. + It supports one critical trip point and one passive trip point. The +diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c +index fe83d7a210d4..af55ac08e1bd 100644 +--- a/drivers/thermal/cpufreq_cooling.c ++++ b/drivers/thermal/cpufreq_cooling.c +@@ -431,6 +431,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + { + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; ++ int ret; + + /* Request state should be less than max_level */ + if (WARN_ON(state > cpufreq_cdev->max_level)) +@@ -442,8 +443,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, + + cpufreq_cdev->cpufreq_state = state; + +- return freq_qos_update_request(&cpufreq_cdev->qos_req, +- get_state_freq(cpufreq_cdev, state)); ++ ret = freq_qos_update_request(&cpufreq_cdev->qos_req, ++ get_state_freq(cpufreq_cdev, state)); ++ return ret < 0 ? ret : 0; + } + + /* Bind cpufreq callbacks to thermal cooling device ops */ +diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c +index c8d57ee0a5bb..2cc276cdfcdb 100644 +--- a/drivers/thermal/qcom/tsens-common.c ++++ b/drivers/thermal/qcom/tsens-common.c +@@ -602,7 +602,7 @@ int __init init_common(struct tsens_priv *priv) + /* DT with separate SROT and TM address space */ + priv->tm_offset = 0; + res = platform_get_resource(op, IORESOURCE_MEM, 1); +- srot_base = devm_ioremap_resource(&op->dev, res); ++ srot_base = devm_ioremap_resource(dev, res); + if (IS_ERR(srot_base)) { + ret = PTR_ERR(srot_base); + goto err_put_device; +@@ -620,7 +620,7 @@ int __init init_common(struct tsens_priv *priv) + } + + res = platform_get_resource(op, IORESOURCE_MEM, 0); +- tm_base = devm_ioremap_resource(&op->dev, res); ++ tm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(tm_base)) { + ret = PTR_ERR(tm_base); + goto err_put_device; +@@ -687,8 +687,6 @@ int __init init_common(struct tsens_priv *priv) + tsens_enable_irq(priv); + tsens_debug_init(op); + +- return 0; +- + err_put_device: + put_device(&op->dev); + return ret; +diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c +index 769e0a5d1dfc..3c6dd06ec5fb 100644 +--- a/drivers/tty/ehv_bytechan.c ++++ b/drivers/tty/ehv_bytechan.c +@@ -136,6 +136,21 @@ static int find_console_handle(void) + return 1; + } + ++static unsigned int local_ev_byte_channel_send(unsigned int handle, ++ unsigned int *count, ++ const char *p) ++{ ++ char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; ++ unsigned int c = *count; ++ ++ if (c < sizeof(buffer)) { ++ memcpy(buffer, p, c); ++ memset(&buffer[c], 0, sizeof(buffer) - c); ++ p = buffer; ++ } ++ return ev_byte_channel_send(handle, count, p); ++} ++ + /*************************** EARLY CONSOLE DRIVER ***************************/ + + #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC +@@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data) + + do { + count = 1; +- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, ++ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + &count, &data); + } while (ret == EV_EAGAIN); + } +@@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, + while (count) { + len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); + do { +- ret = ev_byte_channel_send(handle, &len, s); ++ ret = local_ev_byte_channel_send(handle, &len, s); + } while (ret == EV_EAGAIN); + count -= len; + s += len; +@@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) + CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), + EV_BYTE_CHANNEL_MAX_BYTES); + +- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); ++ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + + /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ + if (!ret || (ret == EV_EAGAIN)) +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index d04554959ea7..30e73ec4ad5c 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -663,20 +663,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate) + fb_logo.depth = 1; + + +- if (fb_logo.depth > 4 && depth > 4) { +- switch (info->fix.visual) { +- case FB_VISUAL_TRUECOLOR: +- fb_logo.needs_truepalette = 1; +- break; +- case FB_VISUAL_DIRECTCOLOR: +- fb_logo.needs_directpalette = 1; +- fb_logo.needs_cmapreset = 1; +- break; +- case FB_VISUAL_PSEUDOCOLOR: +- fb_logo.needs_cmapreset = 1; +- break; +- } +- } ++ if (fb_logo.depth > 4 && depth > 4) { ++ switch (info->fix.visual) { ++ case FB_VISUAL_TRUECOLOR: ++ fb_logo.needs_truepalette = 1; ++ break; ++ case FB_VISUAL_DIRECTCOLOR: ++ fb_logo.needs_directpalette = 1; ++ fb_logo.needs_cmapreset = 1; ++ break; ++ case FB_VISUAL_PSEUDOCOLOR: ++ fb_logo.needs_cmapreset = 1; ++ break; ++ } ++ } + + height = fb_logo.logo->height; + if (fb_center_logo) +@@ -1065,19 +1065,19 @@ fb_blank(struct fb_info *info, int blank) + struct fb_event event; + int ret = -EINVAL; + +- if (blank > FB_BLANK_POWERDOWN) +- blank = FB_BLANK_POWERDOWN; ++ if (blank > FB_BLANK_POWERDOWN) ++ blank = FB_BLANK_POWERDOWN; + + event.info = info; + event.data = ␣ + + if (info->fbops->fb_blank) +- ret = info->fbops->fb_blank(blank, info); ++ ret = info->fbops->fb_blank(blank, info); + + if (!ret) + fb_notifier_call_chain(FB_EVENT_BLANK, &event); + +- return ret; ++ return ret; + } + EXPORT_SYMBOL(fb_blank); + +@@ -1115,7 +1115,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + break; + case FBIOGET_FSCREENINFO: + lock_fb_info(info); +- fix = info->fix; ++ memcpy(&fix, &info->fix, sizeof(fix)); + if (info->flags & FBINFO_HIDE_SMEM_START) + fix.smem_start = 0; + unlock_fb_info(info); +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 341458fd95ca..44375a22307b 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -14,6 +14,7 @@ + #include <linux/slab.h> + #include <linux/module.h> + #include <linux/balloon_compaction.h> ++#include <linux/oom.h> + #include <linux/wait.h> + #include <linux/mm.h> + #include <linux/mount.h> +@@ -27,7 +28,9 @@ + */ + #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) + #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 +-#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 ++/* Maximum number of (4k) pages to deflate on OOM notifications. */ ++#define VIRTIO_BALLOON_OOM_NR_PAGES 256 ++#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80 + + #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ + __GFP_NOMEMALLOC) +@@ -112,8 +115,11 @@ struct virtio_balloon { + /* Memory statistics */ + struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; + +- /* To register a shrinker to shrink memory upon memory pressure */ ++ /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */ + struct shrinker shrinker; ++ ++ /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */ ++ struct notifier_block oom_nb; + }; + + static struct virtio_device_id id_table[] = { +@@ -788,50 +794,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb, + return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES; + } + +-static unsigned long leak_balloon_pages(struct virtio_balloon *vb, +- unsigned long pages_to_free) +-{ +- return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) / +- VIRTIO_BALLOON_PAGES_PER_PAGE; +-} +- +-static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, +- unsigned long pages_to_free) +-{ +- unsigned long pages_freed = 0; +- +- /* +- * One invocation of leak_balloon can deflate at most +- * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it +- * multiple times to deflate pages till reaching pages_to_free. +- */ +- while (vb->num_pages && pages_freed < pages_to_free) +- pages_freed += leak_balloon_pages(vb, +- pages_to_free - pages_freed); +- +- update_balloon_size(vb); +- +- return pages_freed; +-} +- + static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) + { +- unsigned long pages_to_free, pages_freed = 0; + struct virtio_balloon *vb = container_of(shrinker, + struct virtio_balloon, shrinker); + +- pages_to_free = sc->nr_to_scan; +- +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) +- pages_freed = shrink_free_pages(vb, pages_to_free); +- +- if (pages_freed >= pages_to_free) +- return pages_freed; +- +- pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed); +- +- return pages_freed; ++ return shrink_free_pages(vb, sc->nr_to_scan); + } + + static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, +@@ -839,26 +808,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, + { + struct virtio_balloon *vb = container_of(shrinker, + struct virtio_balloon, shrinker); +- unsigned long count; +- +- count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; +- count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; + +- return count; ++ return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; + } + +-static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) ++static int virtio_balloon_oom_notify(struct notifier_block *nb, ++ unsigned long dummy, void *parm) + { +- unregister_shrinker(&vb->shrinker); +-} ++ struct virtio_balloon *vb = container_of(nb, ++ struct virtio_balloon, oom_nb); ++ unsigned long *freed = parm; + +-static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) +-{ +- vb->shrinker.scan_objects = virtio_balloon_shrinker_scan; +- vb->shrinker.count_objects = virtio_balloon_shrinker_count; +- vb->shrinker.seeks = DEFAULT_SEEKS; ++ *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) / ++ VIRTIO_BALLOON_PAGES_PER_PAGE; ++ update_balloon_size(vb); + +- return register_shrinker(&vb->shrinker); ++ return NOTIFY_OK; + } + + static int virtballoon_probe(struct virtio_device *vdev) +@@ -935,22 +900,35 @@ static int virtballoon_probe(struct virtio_device *vdev) + virtio_cwrite(vb->vdev, struct virtio_balloon_config, + poison_val, &poison_val); + } +- } +- /* +- * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a +- * shrinker needs to be registered to relieve memory pressure. +- */ +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { +- err = virtio_balloon_register_shrinker(vb); ++ ++ /* ++ * We're allowed to reuse any free pages, even if they are ++ * still to be processed by the host. ++ */ ++ vb->shrinker.scan_objects = virtio_balloon_shrinker_scan; ++ vb->shrinker.count_objects = virtio_balloon_shrinker_count; ++ vb->shrinker.seeks = DEFAULT_SEEKS; ++ err = register_shrinker(&vb->shrinker); + if (err) + goto out_del_balloon_wq; + } ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { ++ vb->oom_nb.notifier_call = virtio_balloon_oom_notify; ++ vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY; ++ err = register_oom_notifier(&vb->oom_nb); ++ if (err < 0) ++ goto out_unregister_shrinker; ++ } ++ + virtio_device_ready(vdev); + + if (towards_target(vb)) + virtballoon_changed(vdev); + return 0; + ++out_unregister_shrinker: ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) ++ unregister_shrinker(&vb->shrinker); + out_del_balloon_wq: + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + destroy_workqueue(vb->balloon_wq); +@@ -989,8 +967,11 @@ static void virtballoon_remove(struct virtio_device *vdev) + { + struct virtio_balloon *vb = vdev->priv; + +- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) +- virtio_balloon_unregister_shrinker(vb); ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) ++ unregister_oom_notifier(&vb->oom_nb); ++ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) ++ unregister_shrinker(&vb->shrinker); ++ + spin_lock_irq(&vb->stop_update_lock); + vb->stop_update = true; + spin_unlock_irq(&vb->stop_update_lock); +diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c +index 53e04926a7b2..190d26e2e75f 100644 +--- a/drivers/watchdog/sp805_wdt.c ++++ b/drivers/watchdog/sp805_wdt.c +@@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd) + { + struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); + ++ writel_relaxed(UNLOCK, wdt->base + WDTLOCK); + writel_relaxed(0, wdt->base + WDTCONTROL); + writel_relaxed(0, wdt->base + WDTLOAD); + writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); + ++ /* Flush posted writes. */ ++ readl_relaxed(wdt->base + WDTLOCK); ++ + return 0; + } + +diff --git a/fs/afs/dir.c b/fs/afs/dir.c +index 5c794f4b051a..d1e1caa23c8b 100644 +--- a/fs/afs/dir.c ++++ b/fs/afs/dir.c +@@ -1032,7 +1032,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) + struct dentry *parent; + struct inode *inode; + struct key *key; +- afs_dataversion_t dir_version; ++ afs_dataversion_t dir_version, invalid_before; + long de_version; + int ret; + +@@ -1084,8 +1084,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) + if (de_version == (long)dir_version) + goto out_valid_noupdate; + +- dir_version = dir->invalid_before; +- if (de_version - (long)dir_version >= 0) ++ invalid_before = dir->invalid_before; ++ if (de_version - (long)invalid_before >= 0) + goto out_valid; + + _debug("dir modified"); +@@ -1275,6 +1275,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + struct afs_fs_cursor fc; + struct afs_vnode *dvnode = AFS_FS_I(dir); + struct key *key; ++ afs_dataversion_t data_version; + int ret; + + mode |= S_IFDIR; +@@ -1295,7 +1296,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t data_version = dvnode->status.data_version + 1; ++ data_version = dvnode->status.data_version + 1; + + while (afs_select_fileserver(&fc)) { + fc.cb_break = afs_calc_vnode_cb_break(dvnode); +@@ -1316,10 +1317,14 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + goto error_key; + } + +- if (ret == 0 && +- test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) +- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, +- afs_edit_dir_for_create); ++ if (ret == 0) { ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) ++ afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, ++ afs_edit_dir_for_create); ++ up_write(&dvnode->validate_lock); ++ } + + key_put(key); + kfree(scb); +@@ -1360,6 +1365,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) + struct afs_fs_cursor fc; + struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; + struct key *key; ++ afs_dataversion_t data_version; + int ret; + + _enter("{%llx:%llu},{%pd}", +@@ -1391,7 +1397,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t data_version = dvnode->status.data_version + 1; ++ data_version = dvnode->status.data_version + 1; + + while (afs_select_fileserver(&fc)) { + fc.cb_break = afs_calc_vnode_cb_break(dvnode); +@@ -1404,9 +1410,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) + ret = afs_end_vnode_operation(&fc); + if (ret == 0) { + afs_dir_remove_subdir(dentry); +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) + afs_edit_dir_remove(dvnode, &dentry->d_name, + afs_edit_dir_for_rmdir); ++ up_write(&dvnode->validate_lock); + } + } + +@@ -1544,10 +1553,15 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) + ret = afs_end_vnode_operation(&fc); + if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) + ret = afs_dir_remove_link(dvnode, dentry, key); +- if (ret == 0 && +- test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) +- afs_edit_dir_remove(dvnode, &dentry->d_name, +- afs_edit_dir_for_unlink); ++ ++ if (ret == 0) { ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) ++ afs_edit_dir_remove(dvnode, &dentry->d_name, ++ afs_edit_dir_for_unlink); ++ up_write(&dvnode->validate_lock); ++ } + } + + if (need_rehash && ret < 0 && ret != -ENOENT) +@@ -1573,6 +1587,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + struct afs_status_cb *scb; + struct afs_vnode *dvnode = AFS_FS_I(dir); + struct key *key; ++ afs_dataversion_t data_version; + int ret; + + mode |= S_IFREG; +@@ -1597,7 +1612,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t data_version = dvnode->status.data_version + 1; ++ data_version = dvnode->status.data_version + 1; + + while (afs_select_fileserver(&fc)) { + fc.cb_break = afs_calc_vnode_cb_break(dvnode); +@@ -1618,9 +1633,12 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + goto error_key; + } + +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) + afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, + afs_edit_dir_for_create); ++ up_write(&dvnode->validate_lock); + + kfree(scb); + key_put(key); +@@ -1648,6 +1666,7 @@ static int afs_link(struct dentry *from, struct inode *dir, + struct afs_vnode *dvnode = AFS_FS_I(dir); + struct afs_vnode *vnode = AFS_FS_I(d_inode(from)); + struct key *key; ++ afs_dataversion_t data_version; + int ret; + + _enter("{%llx:%llu},{%llx:%llu},{%pd}", +@@ -1672,7 +1691,7 @@ static int afs_link(struct dentry *from, struct inode *dir, + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t data_version = dvnode->status.data_version + 1; ++ data_version = dvnode->status.data_version + 1; + + if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) { + afs_end_vnode_operation(&fc); +@@ -1702,9 +1721,12 @@ static int afs_link(struct dentry *from, struct inode *dir, + goto error_key; + } + +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) + afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid, + afs_edit_dir_for_link); ++ up_write(&dvnode->validate_lock); + + key_put(key); + kfree(scb); +@@ -1732,6 +1754,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, + struct afs_status_cb *scb; + struct afs_vnode *dvnode = AFS_FS_I(dir); + struct key *key; ++ afs_dataversion_t data_version; + int ret; + + _enter("{%llx:%llu},{%pd},%s", +@@ -1759,7 +1782,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t data_version = dvnode->status.data_version + 1; ++ data_version = dvnode->status.data_version + 1; + + while (afs_select_fileserver(&fc)) { + fc.cb_break = afs_calc_vnode_cb_break(dvnode); +@@ -1780,9 +1803,12 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, + goto error_key; + } + +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == data_version) + afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, + afs_edit_dir_for_symlink); ++ up_write(&dvnode->validate_lock); + + key_put(key); + kfree(scb); +@@ -1812,6 +1838,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct dentry *tmp = NULL, *rehash = NULL; + struct inode *new_inode; + struct key *key; ++ afs_dataversion_t orig_data_version; ++ afs_dataversion_t new_data_version; + bool new_negative = d_is_negative(new_dentry); + int ret; + +@@ -1890,10 +1918,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + + ret = -ERESTARTSYS; + if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { +- afs_dataversion_t orig_data_version; +- afs_dataversion_t new_data_version; +- struct afs_status_cb *new_scb = &scb[1]; +- + orig_data_version = orig_dvnode->status.data_version + 1; + + if (orig_dvnode != new_dvnode) { +@@ -1904,7 +1928,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + new_data_version = new_dvnode->status.data_version + 1; + } else { + new_data_version = orig_data_version; +- new_scb = &scb[0]; + } + + while (afs_select_fileserver(&fc)) { +@@ -1912,7 +1935,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode); + afs_fs_rename(&fc, old_dentry->d_name.name, + new_dvnode, new_dentry->d_name.name, +- &scb[0], new_scb); ++ &scb[0], &scb[1]); + } + + afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break, +@@ -1930,18 +1953,25 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + if (ret == 0) { + if (rehash) + d_rehash(rehash); +- if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags)) +- afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, +- afs_edit_dir_for_rename_0); ++ down_write(&orig_dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && ++ orig_dvnode->status.data_version == orig_data_version) ++ afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, ++ afs_edit_dir_for_rename_0); ++ if (orig_dvnode != new_dvnode) { ++ up_write(&orig_dvnode->validate_lock); + +- if (!new_negative && +- test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags)) +- afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, +- afs_edit_dir_for_rename_1); ++ down_write(&new_dvnode->validate_lock); ++ } ++ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) && ++ orig_dvnode->status.data_version == new_data_version) { ++ if (!new_negative) ++ afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, ++ afs_edit_dir_for_rename_1); + +- if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags)) + afs_edit_dir_add(new_dvnode, &new_dentry->d_name, + &vnode->fid, afs_edit_dir_for_rename_2); ++ } + + new_inode = d_inode(new_dentry); + if (new_inode) { +@@ -1957,14 +1987,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, + * Note that if we ever implement RENAME_EXCHANGE, we'll have + * to update both dentries with opposing dir versions. + */ +- if (new_dvnode != orig_dvnode) { +- afs_update_dentry_version(&fc, old_dentry, &scb[1]); +- afs_update_dentry_version(&fc, new_dentry, &scb[1]); +- } else { +- afs_update_dentry_version(&fc, old_dentry, &scb[0]); +- afs_update_dentry_version(&fc, new_dentry, &scb[0]); +- } ++ afs_update_dentry_version(&fc, old_dentry, &scb[1]); ++ afs_update_dentry_version(&fc, new_dentry, &scb[1]); + d_move(old_dentry, new_dentry); ++ up_write(&new_dvnode->validate_lock); + goto error_tmp; + } + +diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c +index 361088a5edb9..d94e2b7cddff 100644 +--- a/fs/afs/dir_silly.c ++++ b/fs/afs/dir_silly.c +@@ -21,6 +21,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode + { + struct afs_fs_cursor fc; + struct afs_status_cb *scb; ++ afs_dataversion_t dir_data_version; + int ret = -ERESTARTSYS; + + _enter("%pd,%pd", old, new); +@@ -31,7 +32,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode + + trace_afs_silly_rename(vnode, false); + if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { +- afs_dataversion_t dir_data_version = dvnode->status.data_version + 1; ++ dir_data_version = dvnode->status.data_version + 1; + + while (afs_select_fileserver(&fc)) { + fc.cb_break = afs_calc_vnode_cb_break(dvnode); +@@ -54,12 +55,15 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode + dvnode->silly_key = key_get(key); + } + +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == dir_data_version) { + afs_edit_dir_remove(dvnode, &old->d_name, + afs_edit_dir_for_silly_0); +- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + afs_edit_dir_add(dvnode, &new->d_name, + &vnode->fid, afs_edit_dir_for_silly_1); ++ } ++ up_write(&dvnode->validate_lock); + } + + kfree(scb); +@@ -181,10 +185,14 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + } + } +- if (ret == 0 && +- test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) +- afs_edit_dir_remove(dvnode, &dentry->d_name, +- afs_edit_dir_for_unlink); ++ if (ret == 0) { ++ down_write(&dvnode->validate_lock); ++ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && ++ dvnode->status.data_version == dir_data_version) ++ afs_edit_dir_remove(dvnode, &dentry->d_name, ++ afs_edit_dir_for_unlink); ++ up_write(&dvnode->validate_lock); ++ } + } + + kfree(scb); +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index 1f9c5d8e6fe5..68fc46634346 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -65,6 +65,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); + u64 data_version, size; + u32 type, abort_code; ++ int ret; + + abort_code = ntohl(xdr->abort_code); + +@@ -78,7 +79,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + */ + status->abort_code = abort_code; + scb->have_error = true; +- return 0; ++ goto good; + } + + pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); +@@ -87,7 +88,8 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + + if (abort_code != 0 && inline_error) { + status->abort_code = abort_code; +- return 0; ++ scb->have_error = true; ++ goto good; + } + + type = ntohl(xdr->type); +@@ -123,13 +125,16 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, + data_version |= (u64)ntohl(xdr->data_version_hi) << 32; + status->data_version = data_version; + scb->have_status = true; +- ++good: ++ ret = 0; ++advance: + *_bp = (const void *)*_bp + sizeof(*xdr); +- return 0; ++ return ret; + + bad: + xdr_dump_bad(*_bp); +- return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ goto advance; + } + + static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) +@@ -981,16 +986,16 @@ static int afs_deliver_fs_rename(struct afs_call *call) + if (ret < 0) + return ret; + +- /* unmarshall the reply once we've received all of it */ ++ /* If the two dirs are the same, we have two copies of the same status ++ * report, so we just decode it twice. ++ */ + bp = call->buffer; + ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + if (ret < 0) + return ret; +- if (call->out_dir_scb != call->out_scb) { +- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- } ++ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); ++ if (ret < 0) ++ return ret; + xdr_decode_AFSVolSync(&bp, call->out_volsync); + + _leave(" = 0 [done]"); +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c +index a26126ac7bf1..83b6d67325f6 100644 +--- a/fs/afs/yfsclient.c ++++ b/fs/afs/yfsclient.c +@@ -186,13 +186,14 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, + const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; + struct afs_file_status *status = &scb->status; + u32 type; ++ int ret; + + status->abort_code = ntohl(xdr->abort_code); + if (status->abort_code != 0) { + if (status->abort_code == VNOVNODE) + status->nlink = 0; + scb->have_error = true; +- return 0; ++ goto good; + } + + type = ntohl(xdr->type); +@@ -220,13 +221,16 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, + status->size = xdr_to_u64(xdr->size); + status->data_version = xdr_to_u64(xdr->data_version); + scb->have_status = true; +- ++good: ++ ret = 0; ++advance: + *_bp += xdr_size(xdr); +- return 0; ++ return ret; + + bad: + xdr_dump_bad(*_bp); +- return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); ++ goto advance; + } + + /* +@@ -1153,11 +1157,9 @@ static int yfs_deliver_fs_rename(struct afs_call *call) + ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + if (ret < 0) + return ret; +- if (call->out_dir_scb != call->out_scb) { +- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); +- if (ret < 0) +- return ret; +- } ++ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); ++ if (ret < 0) ++ return ret; + + xdr_decode_YFSVolSync(&bp, call->out_volsync); + _leave(" = 0 [done]"); +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 69bf2fb6f7cd..84fe0162ff13 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -34,6 +34,7 @@ + #include <linux/task_io_accounting_ops.h> + #include <linux/falloc.h> + #include <linux/uaccess.h> ++#include <linux/suspend.h> + #include "internal.h" + + struct bdev_inode { +@@ -2001,7 +2002,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) + if (bdev_read_only(I_BDEV(bd_inode))) + return -EPERM; + +- if (IS_SWAPFILE(bd_inode)) ++ /* uswsusp needs write permission to the swap */ ++ if (IS_SWAPFILE(bd_inode) && !hibernation_available()) + return -ETXTBSY; + + if (!iov_iter_count(from)) +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 7f09147872dc..c9a3bbc8c6af 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -1987,6 +1987,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) + btrfs_release_path(path); + } + ++ rcu_read_lock(); + list_for_each_entry_rcu(space_info, &info->space_info, list) { + if (!(btrfs_get_alloc_profile(info, space_info->flags) & + (BTRFS_BLOCK_GROUP_RAID10 | +@@ -2007,6 +2008,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) + list) + inc_block_group_ro(cache, 1); + } ++ rcu_read_unlock(); + + btrfs_init_global_block_rsv(info); + ret = check_chunk_block_group_mappings(info); +diff --git a/fs/buffer.c b/fs/buffer.c +index b8d28370cfd7..a50d928af641 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -1377,6 +1377,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) + } + EXPORT_SYMBOL(__breadahead); + ++void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, ++ gfp_t gfp) ++{ ++ struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); ++ if (likely(bh)) { ++ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); ++ brelse(bh); ++ } ++} ++EXPORT_SYMBOL(__breadahead_gfp); ++ + /** + * __bread_gfp() - reads a specified block and returns the bh + * @bdev: the block_device to read from +diff --git a/fs/ceph/file.c b/fs/ceph/file.c +index 5a478cd06e11..7f8c4e308301 100644 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@ -1944,6 +1944,71 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode, + return 0; + } + ++static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off, ++ struct ceph_inode_info *dst_ci, u64 *dst_off, ++ struct ceph_fs_client *fsc, ++ size_t len, unsigned int flags) ++{ ++ struct ceph_object_locator src_oloc, dst_oloc; ++ struct ceph_object_id src_oid, dst_oid; ++ size_t bytes = 0; ++ u64 src_objnum, src_objoff, dst_objnum, dst_objoff; ++ u32 src_objlen, dst_objlen; ++ u32 object_size = src_ci->i_layout.object_size; ++ int ret; ++ ++ src_oloc.pool = src_ci->i_layout.pool_id; ++ src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns); ++ dst_oloc.pool = dst_ci->i_layout.pool_id; ++ dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns); ++ ++ while (len >= object_size) { ++ ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off, ++ object_size, &src_objnum, ++ &src_objoff, &src_objlen); ++ ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off, ++ object_size, &dst_objnum, ++ &dst_objoff, &dst_objlen); ++ ceph_oid_init(&src_oid); ++ ceph_oid_printf(&src_oid, "%llx.%08llx", ++ src_ci->i_vino.ino, src_objnum); ++ ceph_oid_init(&dst_oid); ++ ceph_oid_printf(&dst_oid, "%llx.%08llx", ++ dst_ci->i_vino.ino, dst_objnum); ++ /* Do an object remote copy */ ++ ret = ceph_osdc_copy_from(&fsc->client->osdc, ++ src_ci->i_vino.snap, 0, ++ &src_oid, &src_oloc, ++ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | ++ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE, ++ &dst_oid, &dst_oloc, ++ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | ++ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, ++ dst_ci->i_truncate_seq, ++ dst_ci->i_truncate_size, ++ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ); ++ if (ret) { ++ if (ret == -EOPNOTSUPP) { ++ fsc->have_copy_from2 = false; ++ pr_notice("OSDs don't support copy-from2; disabling copy offload\n"); ++ } ++ dout("ceph_osdc_copy_from returned %d\n", ret); ++ if (!bytes) ++ bytes = ret; ++ goto out; ++ } ++ len -= object_size; ++ bytes += object_size; ++ *src_off += object_size; ++ *dst_off += object_size; ++ } ++ ++out: ++ ceph_oloc_destroy(&src_oloc); ++ ceph_oloc_destroy(&dst_oloc); ++ return bytes; ++} ++ + static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + struct file *dst_file, loff_t dst_off, + size_t len, unsigned int flags) +@@ -1954,14 +2019,11 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + struct ceph_inode_info *dst_ci = ceph_inode(dst_inode); + struct ceph_cap_flush *prealloc_cf; + struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode); +- struct ceph_object_locator src_oloc, dst_oloc; +- struct ceph_object_id src_oid, dst_oid; +- loff_t endoff = 0, size; +- ssize_t ret = -EIO; ++ loff_t size; ++ ssize_t ret = -EIO, bytes; + u64 src_objnum, dst_objnum, src_objoff, dst_objoff; +- u32 src_objlen, dst_objlen, object_size; ++ u32 src_objlen, dst_objlen; + int src_got = 0, dst_got = 0, err, dirty; +- bool do_final_copy = false; + + if (src_inode->i_sb != dst_inode->i_sb) { + struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode); +@@ -2039,22 +2101,14 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + if (ret < 0) + goto out_caps; + +- size = i_size_read(dst_inode); +- endoff = dst_off + len; +- + /* Drop dst file cached pages */ + ret = invalidate_inode_pages2_range(dst_inode->i_mapping, + dst_off >> PAGE_SHIFT, +- endoff >> PAGE_SHIFT); ++ (dst_off + len) >> PAGE_SHIFT); + if (ret < 0) { + dout("Failed to invalidate inode pages (%zd)\n", ret); + ret = 0; /* XXX */ + } +- src_oloc.pool = src_ci->i_layout.pool_id; +- src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns); +- dst_oloc.pool = dst_ci->i_layout.pool_id; +- dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns); +- + ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, + src_ci->i_layout.object_size, + &src_objnum, &src_objoff, &src_objlen); +@@ -2073,6 +2127,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + * starting at the src_off + */ + if (src_objoff) { ++ dout("Initial partial copy of %u bytes\n", src_objlen); ++ + /* + * we need to temporarily drop all caps as we'll be calling + * {read,write}_iter, which will get caps again. +@@ -2080,8 +2136,9 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); + ret = do_splice_direct(src_file, &src_off, dst_file, + &dst_off, src_objlen, flags); +- if (ret < 0) { +- dout("do_splice_direct returned %d\n", err); ++ /* Abort on short copies or on error */ ++ if (ret < src_objlen) { ++ dout("Failed partial copy (%zd)\n", ret); + goto out; + } + len -= ret; +@@ -2094,62 +2151,29 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + if (err < 0) + goto out_caps; + } +- object_size = src_ci->i_layout.object_size; +- while (len >= object_size) { +- ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, +- object_size, &src_objnum, +- &src_objoff, &src_objlen); +- ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off, +- object_size, &dst_objnum, +- &dst_objoff, &dst_objlen); +- ceph_oid_init(&src_oid); +- ceph_oid_printf(&src_oid, "%llx.%08llx", +- src_ci->i_vino.ino, src_objnum); +- ceph_oid_init(&dst_oid); +- ceph_oid_printf(&dst_oid, "%llx.%08llx", +- dst_ci->i_vino.ino, dst_objnum); +- /* Do an object remote copy */ +- err = ceph_osdc_copy_from( +- &src_fsc->client->osdc, +- src_ci->i_vino.snap, 0, +- &src_oid, &src_oloc, +- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | +- CEPH_OSD_OP_FLAG_FADVISE_NOCACHE, +- &dst_oid, &dst_oloc, +- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | +- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, +- dst_ci->i_truncate_seq, dst_ci->i_truncate_size, +- CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ); +- if (err) { +- if (err == -EOPNOTSUPP) { +- src_fsc->have_copy_from2 = false; +- pr_notice("OSDs don't support copy-from2; disabling copy offload\n"); +- } +- dout("ceph_osdc_copy_from returned %d\n", err); +- if (!ret) +- ret = err; +- goto out_caps; +- } +- len -= object_size; +- src_off += object_size; +- dst_off += object_size; +- ret += object_size; +- } + +- if (len) +- /* We still need one final local copy */ +- do_final_copy = true; ++ size = i_size_read(dst_inode); ++ bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off, ++ src_fsc, len, flags); ++ if (bytes <= 0) { ++ if (!ret) ++ ret = bytes; ++ goto out_caps; ++ } ++ dout("Copied %zu bytes out of %zu\n", bytes, len); ++ len -= bytes; ++ ret += bytes; + + file_update_time(dst_file); + inode_inc_iversion_raw(dst_inode); + +- if (endoff > size) { ++ if (dst_off > size) { + int caps_flags = 0; + + /* Let the MDS know about dst file size change */ +- if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff)) ++ if (ceph_quota_is_max_bytes_approaching(dst_inode, dst_off)) + caps_flags |= CHECK_CAPS_NODELAY; +- if (ceph_inode_set_size(dst_inode, endoff)) ++ if (ceph_inode_set_size(dst_inode, dst_off)) + caps_flags |= CHECK_CAPS_AUTHONLY; + if (caps_flags) + ceph_check_caps(dst_ci, caps_flags, NULL); +@@ -2165,15 +2189,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, + out_caps: + put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); + +- if (do_final_copy) { +- err = do_splice_direct(src_file, &src_off, dst_file, +- &dst_off, len, flags); +- if (err < 0) { +- dout("do_splice_direct returned %d\n", err); +- goto out; +- } +- len -= err; +- ret += err; ++ /* ++ * Do the final manual copy if we still have some bytes left, unless ++ * there were errors in remote object copies (len >= object_size). ++ */ ++ if (len && (len < src_ci->i_layout.object_size)) { ++ dout("Final partial copy of %zu bytes\n", len); ++ bytes = do_splice_direct(src_file, &src_off, dst_file, ++ &dst_off, len, flags); ++ if (bytes > 0) ++ ret += bytes; ++ else ++ dout("Failed partial copy (%zd)\n", bytes); + } + + out: +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index 0511aaf451d4..497afb0b9960 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -766,6 +766,20 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, + + cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); + spin_lock(&cifs_tcp_ses_lock); ++ if (tcon->tc_count <= 0) { ++ struct TCP_Server_Info *server = NULL; ++ ++ WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative"); ++ spin_unlock(&cifs_tcp_ses_lock); ++ ++ if (tcon->ses) ++ server = tcon->ses->server; ++ ++ cifs_server_dbg(FYI, "tid=%u: tcon is closing, skipping async close retry of fid %llu %llu\n", ++ tcon->tid, persistent_fid, volatile_fid); ++ ++ return 0; ++ } + tcon->tc_count++; + spin_unlock(&cifs_tcp_ses_lock); + +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index cb3ee916f527..c97570eb2c18 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -466,7 +466,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, + struct smb_rqst *rqst, int flags) + { + struct kvec iov; +- struct smb2_transform_hdr tr_hdr; ++ struct smb2_transform_hdr *tr_hdr; + struct smb_rqst cur_rqst[MAX_COMPOUND]; + int rc; + +@@ -476,28 +476,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, + if (num_rqst > MAX_COMPOUND - 1) + return -ENOMEM; + +- memset(&cur_rqst[0], 0, sizeof(cur_rqst)); +- memset(&iov, 0, sizeof(iov)); +- memset(&tr_hdr, 0, sizeof(tr_hdr)); +- +- iov.iov_base = &tr_hdr; +- iov.iov_len = sizeof(tr_hdr); +- cur_rqst[0].rq_iov = &iov; +- cur_rqst[0].rq_nvec = 1; +- + if (!server->ops->init_transform_rq) { + cifs_server_dbg(VFS, "Encryption requested but transform " + "callback is missing\n"); + return -EIO; + } + ++ tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS); ++ if (!tr_hdr) ++ return -ENOMEM; ++ ++ memset(&cur_rqst[0], 0, sizeof(cur_rqst)); ++ memset(&iov, 0, sizeof(iov)); ++ memset(tr_hdr, 0, sizeof(*tr_hdr)); ++ ++ iov.iov_base = tr_hdr; ++ iov.iov_len = sizeof(*tr_hdr); ++ cur_rqst[0].rq_iov = &iov; ++ cur_rqst[0].rq_nvec = 1; ++ + rc = server->ops->init_transform_rq(server, num_rqst + 1, + &cur_rqst[0], rqst); + if (rc) +- return rc; ++ goto out; + + rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); + smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); ++out: ++ kfree(tr_hdr); + return rc; + } + +diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c +index 0456bc990b5e..62acbe27d8bf 100644 +--- a/fs/ext2/xattr.c ++++ b/fs/ext2/xattr.c +@@ -56,6 +56,7 @@ + + #include <linux/buffer_head.h> + #include <linux/init.h> ++#include <linux/printk.h> + #include <linux/slab.h> + #include <linux/mbcache.h> + #include <linux/quotaops.h> +@@ -84,8 +85,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(inode, f...) no_printk(f) ++# define ea_bdebug(bh, f...) no_printk(f) + #endif + + static int ext2_xattr_set2(struct inode *, struct buffer_head *, +@@ -864,8 +865,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) + true); + if (error) { + if (error == -EBUSY) { +- ea_bdebug(bh, "already in cache (%d cache entries)", +- atomic_read(&ext2_xattr_cache->c_entry_count)); ++ ea_bdebug(bh, "already in cache"); + error = 0; + } + } else +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index c5d05564cd29..37f65ad0d823 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4348,7 +4348,7 @@ make_io: + if (end > table) + end = table; + while (b <= end) +- sb_breadahead(sb, b++); ++ sb_breadahead_unmovable(sb, b++); + } + + /* +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 4f0444f3cda3..16da3b3481a4 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -372,7 +372,8 @@ static void save_error_info(struct super_block *sb, const char *func, + unsigned int line) + { + __save_error_info(sb, func, line); +- ext4_commit_super(sb, 1); ++ if (!bdev_read_only(sb->s_bdev)) ++ ext4_commit_super(sb, 1); + } + + /* +@@ -4331,7 +4332,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + /* Pre-read the descriptors into the buffer cache */ + for (i = 0; i < db_count; i++) { + block = descriptor_loc(sb, logical_sb_block, i); +- sb_breadahead(sb, block); ++ sb_breadahead_unmovable(sb, block); + } + + for (i = 0; i < db_count; i++) { +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c +index 44e84ac5c941..79aaf06004f6 100644 +--- a/fs/f2fs/checkpoint.c ++++ b/fs/f2fs/checkpoint.c +@@ -1250,20 +1250,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi) + f2fs_unlock_all(sbi); + } + +-void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) ++void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) + { + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); + +- if (!get_pages(sbi, F2FS_WB_CP_DATA)) ++ if (!get_pages(sbi, type)) + break; + + if (unlikely(f2fs_cp_error(sbi))) + break; + +- io_schedule_timeout(5*HZ); ++ io_schedule_timeout(HZ/50); + } + finish_wait(&sbi->cp_wait, &wait); + } +@@ -1301,10 +1301,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) + else + __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); + +- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || +- is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) ++ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) + __set_ckpt_flags(ckpt, CP_FSCK_FLAG); + ++ if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) ++ __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); ++ else ++ __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); ++ + if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) + __set_ckpt_flags(ckpt, CP_DISABLED_FLAG); + else +@@ -1384,8 +1388,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) + + /* Flush all the NAT/SIT pages */ + f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); +- f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && +- !f2fs_cp_error(sbi)); + + /* + * modify checkpoint +@@ -1493,11 +1495,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) + + /* Here, we have one bio having CP pack except cp pack 2 page */ + f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); +- f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && +- !f2fs_cp_error(sbi)); ++ /* Wait for all dirty meta pages to be submitted for IO */ ++ f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); + + /* wait for previous submitted meta pages writeback */ +- f2fs_wait_on_all_pages_writeback(sbi); ++ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); + + /* flush all device cache */ + err = f2fs_flush_device_cache(sbi); +@@ -1506,7 +1508,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) + + /* barrier and flush checkpoint cp pack 2 page if it can */ + commit_checkpoint(sbi, ckpt, start_blk); +- f2fs_wait_on_all_pages_writeback(sbi); ++ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); + + /* + * invalidate intermediate page cache borrowed from meta inode which are +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index d8a64be90a50..837e14b7ef52 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -385,16 +385,22 @@ static int f2fs_compress_pages(struct compress_ctx *cc) + for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) + cc->cbuf->reserved[i] = cpu_to_le32(0); + ++ nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); ++ ++ /* zero out any unused part of the last page */ ++ memset(&cc->cbuf->cdata[cc->clen], 0, ++ (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE)); ++ + vunmap(cc->cbuf); + vunmap(cc->rbuf); + +- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); +- + for (i = nr_cpages; i < cc->nr_cpages; i++) { + f2fs_put_compressed_page(cc->cpages[i]); + cc->cpages[i] = NULL; + } + ++ cops->destroy_compress_ctx(cc); ++ + cc->nr_cpages = nr_cpages; + + trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, +@@ -474,6 +480,8 @@ out_vunmap_cbuf: + out_vunmap_rbuf: + vunmap(dic->rbuf); + out_free_dic: ++ if (verity) ++ refcount_add(dic->nr_cpages - 1, &dic->ref); + if (!verity) + f2fs_decompress_end_io(dic->rpages, dic->cluster_size, + ret, false); +@@ -532,8 +540,7 @@ static bool __cluster_may_compress(struct compress_ctx *cc) + return true; + } + +-/* return # of compressed block addresses */ +-static int f2fs_compressed_blocks(struct compress_ctx *cc) ++static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) + { + struct dnode_of_data dn; + int ret; +@@ -556,8 +563,13 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc) + + blkaddr = datablock_addr(dn.inode, + dn.node_page, dn.ofs_in_node + i); +- if (blkaddr != NULL_ADDR) +- ret++; ++ if (compr) { ++ if (__is_valid_data_blkaddr(blkaddr)) ++ ret++; ++ } else { ++ if (blkaddr != NULL_ADDR) ++ ret++; ++ } + } + } + fail: +@@ -565,6 +577,18 @@ fail: + return ret; + } + ++/* return # of compressed blocks in compressed cluster */ ++static int f2fs_compressed_blocks(struct compress_ctx *cc) ++{ ++ return __f2fs_cluster_blocks(cc, true); ++} ++ ++/* return # of valid blocks in compressed cluster */ ++static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) ++{ ++ return __f2fs_cluster_blocks(cc, false); ++} ++ + int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) + { + struct compress_ctx cc = { +@@ -574,7 +598,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) + .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, + }; + +- return f2fs_compressed_blocks(&cc); ++ return f2fs_cluster_blocks(&cc, false); + } + + static bool cluster_may_compress(struct compress_ctx *cc) +@@ -623,7 +647,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + bool prealloc; + + retry: +- ret = f2fs_compressed_blocks(cc); ++ ret = f2fs_cluster_blocks(cc, false); + if (ret <= 0) + return ret; + +@@ -772,7 +796,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + .encrypted_page = NULL, + .compressed_page = NULL, + .submitted = false, +- .need_lock = LOCK_RETRY, + .io_type = io_type, + .io_wbc = wbc, + .encrypted = f2fs_encrypted_file(cc->inode), +@@ -785,9 +808,10 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + loff_t psize; + int i, err; + +- set_new_dnode(&dn, cc->inode, NULL, NULL, 0); ++ if (!f2fs_trylock_op(sbi)) ++ return -EAGAIN; + +- f2fs_lock_op(sbi); ++ set_new_dnode(&dn, cc->inode, NULL, NULL, 0); + + err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); + if (err) +@@ -845,7 +869,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + + blkaddr = datablock_addr(dn.inode, dn.node_page, + dn.ofs_in_node); +- fio.page = cic->rpages[i]; ++ fio.page = cc->rpages[i]; + fio.old_blkaddr = blkaddr; + + /* cluster header */ +@@ -984,6 +1008,15 @@ retry_write: + unlock_page(cc->rpages[i]); + ret = 0; + } else if (ret == -EAGAIN) { ++ /* ++ * for quota file, just redirty left pages to ++ * avoid deadlock caused by cluster update race ++ * from foreground operation. ++ */ ++ if (IS_NOQUOTA(cc->inode)) { ++ err = 0; ++ goto out_err; ++ } + ret = 0; + cond_resched(); + congestion_wait(BLK_RW_ASYNC, HZ/50); +@@ -992,16 +1025,12 @@ retry_write: + goto retry_write; + } + err = ret; +- goto out_fail; ++ goto out_err; + } + + *submitted += _submitted; + } + return 0; +- +-out_fail: +- /* TODO: revoke partially updated block addresses */ +- BUG_ON(compr_blocks); + out_err: + for (++i; i < cc->cluster_size; i++) { + if (!cc->rpages[i]) +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index b27b72107911..34990866cfe9 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -191,12 +191,37 @@ static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size) + + static void f2fs_verify_bio(struct bio *bio) + { +- struct page *page = bio_first_page_all(bio); +- struct decompress_io_ctx *dic = +- (struct decompress_io_ctx *)page_private(page); ++ struct bio_vec *bv; ++ struct bvec_iter_all iter_all; ++ ++ bio_for_each_segment_all(bv, bio, iter_all) { ++ struct page *page = bv->bv_page; ++ struct decompress_io_ctx *dic; ++ ++ dic = (struct decompress_io_ctx *)page_private(page); ++ ++ if (dic) { ++ if (refcount_dec_not_one(&dic->ref)) ++ continue; ++ f2fs_verify_pages(dic->rpages, ++ dic->cluster_size); ++ f2fs_free_dic(dic); ++ continue; ++ } ++ ++ if (bio->bi_status || PageError(page)) ++ goto clear_uptodate; + +- f2fs_verify_pages(dic->rpages, dic->cluster_size); +- f2fs_free_dic(dic); ++ if (fsverity_verify_page(page)) { ++ SetPageUptodate(page); ++ goto unlock; ++ } ++clear_uptodate: ++ ClearPageUptodate(page); ++ ClearPageError(page); ++unlock: ++ unlock_page(page); ++ } + } + #endif + +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 5355be6b6755..71801a1709f0 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -100,6 +100,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; + #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 + #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 + #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 ++#define F2FS_MOUNT_NORECOVERY 0x04000000 + + #define F2FS_OPTION(sbi) ((sbi)->mount_opt) + #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) +@@ -675,6 +676,44 @@ enum { + MAX_GC_FAILURE + }; + ++/* used for f2fs_inode_info->flags */ ++enum { ++ FI_NEW_INODE, /* indicate newly allocated inode */ ++ FI_DIRTY_INODE, /* indicate inode is dirty or not */ ++ FI_AUTO_RECOVER, /* indicate inode is recoverable */ ++ FI_DIRTY_DIR, /* indicate directory has dirty pages */ ++ FI_INC_LINK, /* need to increment i_nlink */ ++ FI_ACL_MODE, /* indicate acl mode */ ++ FI_NO_ALLOC, /* should not allocate any blocks */ ++ FI_FREE_NID, /* free allocated nide */ ++ FI_NO_EXTENT, /* not to use the extent cache */ ++ FI_INLINE_XATTR, /* used for inline xattr */ ++ FI_INLINE_DATA, /* used for inline data*/ ++ FI_INLINE_DENTRY, /* used for inline dentry */ ++ FI_APPEND_WRITE, /* inode has appended data */ ++ FI_UPDATE_WRITE, /* inode has in-place-update data */ ++ FI_NEED_IPU, /* used for ipu per file */ ++ FI_ATOMIC_FILE, /* indicate atomic file */ ++ FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ ++ FI_VOLATILE_FILE, /* indicate volatile file */ ++ FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ ++ FI_DROP_CACHE, /* drop dirty page cache */ ++ FI_DATA_EXIST, /* indicate data exists */ ++ FI_INLINE_DOTS, /* indicate inline dot dentries */ ++ FI_DO_DEFRAG, /* indicate defragment is running */ ++ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ ++ FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ ++ FI_HOT_DATA, /* indicate file is hot */ ++ FI_EXTRA_ATTR, /* indicate file has extra attribute */ ++ FI_PROJ_INHERIT, /* indicate file inherits projectid */ ++ FI_PIN_FILE, /* indicate file should not be gced */ ++ FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ ++ FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ ++ FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ ++ FI_MMAP_FILE, /* indicate file was mmapped */ ++ FI_MAX, /* max flag, never be used */ ++}; ++ + struct f2fs_inode_info { + struct inode vfs_inode; /* serve a vfs inode */ + unsigned long i_flags; /* keep an inode flags for ioctl */ +@@ -687,7 +726,7 @@ struct f2fs_inode_info { + umode_t i_acl_mode; /* keep file acl mode temporarily */ + + /* Use below internally in f2fs*/ +- unsigned long flags; /* use to pass per-file flags */ ++ unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ + struct rw_semaphore i_sem; /* protect fi info */ + atomic_t dirty_pages; /* # of dirty pages */ + f2fs_hash_t chash; /* hash value of given file name */ +@@ -2497,43 +2536,6 @@ static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) + return flags & F2FS_OTHER_FLMASK; + } + +-/* used for f2fs_inode_info->flags */ +-enum { +- FI_NEW_INODE, /* indicate newly allocated inode */ +- FI_DIRTY_INODE, /* indicate inode is dirty or not */ +- FI_AUTO_RECOVER, /* indicate inode is recoverable */ +- FI_DIRTY_DIR, /* indicate directory has dirty pages */ +- FI_INC_LINK, /* need to increment i_nlink */ +- FI_ACL_MODE, /* indicate acl mode */ +- FI_NO_ALLOC, /* should not allocate any blocks */ +- FI_FREE_NID, /* free allocated nide */ +- FI_NO_EXTENT, /* not to use the extent cache */ +- FI_INLINE_XATTR, /* used for inline xattr */ +- FI_INLINE_DATA, /* used for inline data*/ +- FI_INLINE_DENTRY, /* used for inline dentry */ +- FI_APPEND_WRITE, /* inode has appended data */ +- FI_UPDATE_WRITE, /* inode has in-place-update data */ +- FI_NEED_IPU, /* used for ipu per file */ +- FI_ATOMIC_FILE, /* indicate atomic file */ +- FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ +- FI_VOLATILE_FILE, /* indicate volatile file */ +- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ +- FI_DROP_CACHE, /* drop dirty page cache */ +- FI_DATA_EXIST, /* indicate data exists */ +- FI_INLINE_DOTS, /* indicate inline dot dentries */ +- FI_DO_DEFRAG, /* indicate defragment is running */ +- FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ +- FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ +- FI_HOT_DATA, /* indicate file is hot */ +- FI_EXTRA_ATTR, /* indicate file has extra attribute */ +- FI_PROJ_INHERIT, /* indicate file inherits projectid */ +- FI_PIN_FILE, /* indicate file should not be gced */ +- FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ +- FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ +- FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ +- FI_MMAP_FILE, /* indicate file was mmapped */ +-}; +- + static inline void __mark_inode_dirty_flag(struct inode *inode, + int flag, bool set) + { +@@ -2555,20 +2557,18 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, + + static inline void set_inode_flag(struct inode *inode, int flag) + { +- if (!test_bit(flag, &F2FS_I(inode)->flags)) +- set_bit(flag, &F2FS_I(inode)->flags); ++ test_and_set_bit(flag, F2FS_I(inode)->flags); + __mark_inode_dirty_flag(inode, flag, true); + } + + static inline int is_inode_flag_set(struct inode *inode, int flag) + { +- return test_bit(flag, &F2FS_I(inode)->flags); ++ return test_bit(flag, F2FS_I(inode)->flags); + } + + static inline void clear_inode_flag(struct inode *inode, int flag) + { +- if (test_bit(flag, &F2FS_I(inode)->flags)) +- clear_bit(flag, &F2FS_I(inode)->flags); ++ test_and_clear_bit(flag, F2FS_I(inode)->flags); + __mark_inode_dirty_flag(inode, flag, false); + } + +@@ -2659,19 +2659,19 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) + struct f2fs_inode_info *fi = F2FS_I(inode); + + if (ri->i_inline & F2FS_INLINE_XATTR) +- set_bit(FI_INLINE_XATTR, &fi->flags); ++ set_bit(FI_INLINE_XATTR, fi->flags); + if (ri->i_inline & F2FS_INLINE_DATA) +- set_bit(FI_INLINE_DATA, &fi->flags); ++ set_bit(FI_INLINE_DATA, fi->flags); + if (ri->i_inline & F2FS_INLINE_DENTRY) +- set_bit(FI_INLINE_DENTRY, &fi->flags); ++ set_bit(FI_INLINE_DENTRY, fi->flags); + if (ri->i_inline & F2FS_DATA_EXIST) +- set_bit(FI_DATA_EXIST, &fi->flags); ++ set_bit(FI_DATA_EXIST, fi->flags); + if (ri->i_inline & F2FS_INLINE_DOTS) +- set_bit(FI_INLINE_DOTS, &fi->flags); ++ set_bit(FI_INLINE_DOTS, fi->flags); + if (ri->i_inline & F2FS_EXTRA_ATTR) +- set_bit(FI_EXTRA_ATTR, &fi->flags); ++ set_bit(FI_EXTRA_ATTR, fi->flags); + if (ri->i_inline & F2FS_PIN_FILE) +- set_bit(FI_PIN_FILE, &fi->flags); ++ set_bit(FI_PIN_FILE, fi->flags); + } + + static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) +@@ -3308,7 +3308,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); + void f2fs_update_dirty_page(struct inode *inode, struct page *page); + void f2fs_remove_dirty_inode(struct inode *inode); + int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); +-void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi); ++void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); + int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); + void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); + int __init f2fs_create_checkpoint_caches(void); +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index 0d4da644df3b..a41c633ac6cf 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -1787,12 +1787,15 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) + static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) + { + struct f2fs_inode_info *fi = F2FS_I(inode); ++ u32 masked_flags = fi->i_flags & mask; ++ ++ f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask)); + + /* Is it quota file? Do not allow user to mess with it */ + if (IS_NOQUOTA(inode)) + return -EPERM; + +- if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) { ++ if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { + if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) + return -EOPNOTSUPP; + if (!f2fs_empty_dir(inode)) +@@ -1806,9 +1809,9 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) + return -EINVAL; + } + +- if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) { ++ if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { + if (S_ISREG(inode->i_mode) && +- (fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) || ++ (masked_flags & F2FS_COMPR_FL || i_size_read(inode) || + F2FS_HAS_BLOCKS(inode))) + return -EINVAL; + if (iflags & F2FS_NOCOMP_FL) +@@ -1825,8 +1828,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) + set_compress_context(inode); + } + } +- if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) { +- if (fi->i_flags & F2FS_COMPR_FL) ++ if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { ++ if (masked_flags & F2FS_COMPR_FL) + return -EINVAL; + } + +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c +index db8725d473b5..3cced15efebc 100644 +--- a/fs/f2fs/gc.c ++++ b/fs/f2fs/gc.c +@@ -1018,8 +1018,8 @@ next_step: + * race condition along with SSR block allocation. + */ + if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || +- get_valid_blocks(sbi, segno, false) == +- sbi->blocks_per_seg) ++ get_valid_blocks(sbi, segno, true) == ++ BLKS_PER_SEC(sbi)) + return submitted; + + if (check_valid_map(sbi, segno, off) == 0) +@@ -1434,12 +1434,19 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, + static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) + { + struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); +- int section_count = le32_to_cpu(raw_sb->section_count); +- int segment_count = le32_to_cpu(raw_sb->segment_count); +- int segment_count_main = le32_to_cpu(raw_sb->segment_count_main); +- long long block_count = le64_to_cpu(raw_sb->block_count); ++ int section_count; ++ int segment_count; ++ int segment_count_main; ++ long long block_count; + int segs = secs * sbi->segs_per_sec; + ++ down_write(&sbi->sb_lock); ++ ++ section_count = le32_to_cpu(raw_sb->section_count); ++ segment_count = le32_to_cpu(raw_sb->segment_count); ++ segment_count_main = le32_to_cpu(raw_sb->segment_count_main); ++ block_count = le64_to_cpu(raw_sb->block_count); ++ + raw_sb->section_count = cpu_to_le32(section_count + secs); + raw_sb->segment_count = cpu_to_le32(segment_count + segs); + raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); +@@ -1453,6 +1460,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) + raw_sb->devs[last_dev].total_segments = + cpu_to_le32(dev_segs + segs); + } ++ ++ up_write(&sbi->sb_lock); + } + + static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) +@@ -1570,11 +1579,17 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) + goto out; + } + ++ mutex_lock(&sbi->cp_mutex); + update_fs_metadata(sbi, -secs); + clear_sbi_flag(sbi, SBI_IS_RESIZEFS); ++ set_sbi_flag(sbi, SBI_IS_DIRTY); ++ mutex_unlock(&sbi->cp_mutex); ++ + err = f2fs_sync_fs(sbi->sb, 1); + if (err) { ++ mutex_lock(&sbi->cp_mutex); + update_fs_metadata(sbi, secs); ++ mutex_unlock(&sbi->cp_mutex); + update_sb_metadata(sbi, secs); + f2fs_commit_super(sbi, false); + } +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 78c3f1d70f1d..901e9f4ce12b 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -345,7 +345,7 @@ static int do_read_inode(struct inode *inode) + fi->i_flags = le32_to_cpu(ri->i_flags); + if (S_ISREG(inode->i_mode)) + fi->i_flags &= ~F2FS_PROJINHERIT_FL; +- fi->flags = 0; ++ bitmap_zero(fi->flags, FI_MAX); + fi->i_advise = ri->i_advise; + fi->i_pino = le32_to_cpu(ri->i_pino); + fi->i_dir_level = ri->i_dir_level; +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index 9d02cdcdbb07..e58c4c628834 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -1562,15 +1562,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, + if (atomic && !test_opt(sbi, NOBARRIER)) + fio.op_flags |= REQ_PREFLUSH | REQ_FUA; + +- set_page_writeback(page); +- ClearPageError(page); +- ++ /* should add to global list before clearing PAGECACHE status */ + if (f2fs_in_warm_node_list(sbi, page)) { + seq = f2fs_add_fsync_node_entry(sbi, page); + if (seq_id) + *seq_id = seq; + } + ++ set_page_writeback(page); ++ ClearPageError(page); ++ + fio.old_blkaddr = ni.blk_addr; + f2fs_do_write_node_page(nid, &fio); + set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 65a7a432dfee..8deb0a260d92 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -446,7 +446,7 @@ static int parse_options(struct super_block *sb, char *options) + break; + case Opt_norecovery: + /* this option mounts f2fs with ro */ +- set_opt(sbi, DISABLE_ROLL_FORWARD); ++ set_opt(sbi, NORECOVERY); + if (!f2fs_readonly(sb)) + return -EINVAL; + break; +@@ -1172,7 +1172,7 @@ static void f2fs_put_super(struct super_block *sb) + /* our cp_error case, we can wait for any writeback page */ + f2fs_flush_merged_writes(sbi); + +- f2fs_wait_on_all_pages_writeback(sbi); ++ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); + + f2fs_bug_on(sbi, sbi->fsync_node_num); + +@@ -1446,6 +1446,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) + } + if (test_opt(sbi, DISABLE_ROLL_FORWARD)) + seq_puts(seq, ",disable_roll_forward"); ++ if (test_opt(sbi, NORECOVERY)) ++ seq_puts(seq, ",norecovery"); + if (test_opt(sbi, DISCARD)) + seq_puts(seq, ",discard"); + else +@@ -1927,6 +1929,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, + int offset = off & (sb->s_blocksize - 1); + size_t towrite = len; + struct page *page; ++ void *fsdata = NULL; + char *kaddr; + int err = 0; + int tocopy; +@@ -1936,7 +1939,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, + towrite); + retry: + err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, +- &page, NULL); ++ &page, &fsdata); + if (unlikely(err)) { + if (err == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); +@@ -1952,7 +1955,7 @@ retry: + flush_dcache_page(page); + + a_ops->write_end(NULL, mapping, off, tocopy, tocopy, +- page, NULL); ++ page, fsdata); + offset = 0; + towrite -= tocopy; + off += tocopy; +@@ -3598,7 +3601,8 @@ try_onemore: + goto reset_checkpoint; + + /* recover fsynced data */ +- if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { ++ if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && ++ !test_opt(sbi, NORECOVERY)) { + /* + * mount should be failed, when device has readonly mode, and + * previous checkpoint was not done by clean system shutdown. +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c +index 08dd6a430234..60d911e293e6 100644 +--- a/fs/gfs2/log.c ++++ b/fs/gfs2/log.c +@@ -104,16 +104,22 @@ __acquires(&sdp->sd_ail_lock) + gfs2_assert(sdp, bd->bd_tr == tr); + + if (!buffer_busy(bh)) { +- if (!buffer_uptodate(bh) && +- !test_and_set_bit(SDF_AIL1_IO_ERROR, ++ if (buffer_uptodate(bh)) { ++ list_move(&bd->bd_ail_st_list, ++ &tr->tr_ail2_list); ++ continue; ++ } ++ if (!test_and_set_bit(SDF_AIL1_IO_ERROR, + &sdp->sd_flags)) { + gfs2_io_error_bh(sdp, bh); + *withdraw = true; + } +- list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); +- continue; + } + ++ if (gfs2_withdrawn(sdp)) { ++ gfs2_remove_from_ail(bd); ++ continue; ++ } + if (!buffer_dirty(bh)) + continue; + if (gl == bd->bd_gl) +@@ -862,6 +868,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) + if (gfs2_ail1_empty(sdp)) + break; + } ++ if (gfs2_withdrawn(sdp)) ++ goto out; + atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ + trace_gfs2_log_blocks(sdp, -1); + log_write_header(sdp, flags); +@@ -874,6 +882,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) + atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); + } + ++out: + trace_gfs2_log_flush(sdp, 0, flags); + up_write(&sdp->sd_log_flush_lock); + +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index cd4c6bc81cae..40d31024b72d 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -128,6 +128,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, + + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + list_for_each_entry(lo, &server->layouts, plh_layouts) { ++ if (!pnfs_layout_is_valid(lo)) ++ continue; + if (stateid != NULL && + !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) + continue; +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index b768a0b42e82..ade2435551c8 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -571,6 +571,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +@@ -990,6 +991,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 11bf15800ac9..a10fb87c6ac3 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -959,16 +959,16 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, + struct file *filp) + { + struct nfs_open_context *ctx; +- const struct cred *cred = get_current_cred(); + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); +- if (!ctx) { +- put_cred(cred); ++ if (!ctx) + return ERR_PTR(-ENOMEM); +- } + nfs_sb_active(dentry->d_sb); + ctx->dentry = dget(dentry); +- ctx->cred = cred; ++ if (filp) ++ ctx->cred = get_cred(filp->f_cred); ++ else ++ ctx->cred = get_current_cred(); + ctx->ll_cred = NULL; + ctx->state = NULL; + ctx->mode = f_mode; +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c +index 1297919e0fce..8e5d6223ddd3 100644 +--- a/fs/nfs/nfs4file.c ++++ b/fs/nfs/nfs4file.c +@@ -252,6 +252,9 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, + if (remap_flags & ~REMAP_FILE_ADVISORY) + return -EINVAL; + ++ if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode)) ++ return -ETXTBSY; ++ + /* check alignment w.r.t. clone_blksize */ + ret = -EINVAL; + if (bs) { +diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c +index effaa4247b91..8d3278805602 100644 +--- a/fs/nfs/nfsroot.c ++++ b/fs/nfs/nfsroot.c +@@ -88,7 +88,7 @@ + #define NFS_ROOT "/tftpboot/%s" + + /* Default NFSROOT mount options. */ +-#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096" ++#define NFS_DEF_OPTIONS "vers=2,tcp,rsize=4096,wsize=4096" + + /* Parameters passed from the kernel command line */ + static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = ""; +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 8b7c525dbbf7..b736912098ee 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -886,15 +886,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, + pgio->pg_mirror_count = mirror_count; + } + +-/* +- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) +- */ +-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +-{ +- pgio->pg_mirror_count = 1; +- pgio->pg_mirror_idx = 0; +-} +- + static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) + { + pgio->pg_mirror_count = 1; +@@ -1320,6 +1311,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) + } + } + ++/* ++ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) ++ */ ++void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) ++{ ++ nfs_pageio_complete(pgio); ++} ++ + int __init nfs_init_nfspagecache(void) + { + nfs_page_cachep = kmem_cache_create("nfs_page", +diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c +index d8053bc96c4d..5a130409f173 100644 +--- a/fs/xfs/libxfs/xfs_alloc.c ++++ b/fs/xfs/libxfs/xfs_alloc.c +@@ -1515,7 +1515,7 @@ xfs_alloc_ag_vextent_lastblock( + * maxlen, go to the start of this block, and skip all those smaller + * than minlen. + */ +- if (len || args->alignment > 1) { ++ if (*len || args->alignment > 1) { + acur->cnt->bc_ptrs[0] = 1; + do { + error = xfs_alloc_get_rec(acur->cnt, bno, len, &i); +diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c +index bbfa6ba84dcd..fe8f60b59ec4 100644 +--- a/fs/xfs/xfs_attr_inactive.c ++++ b/fs/xfs/xfs_attr_inactive.c +@@ -145,8 +145,8 @@ xfs_attr3_node_inactive( + * Since this code is recursive (gasp!) we must protect ourselves. + */ + if (level > XFS_DA_NODE_MAXDEPTH) { +- xfs_trans_brelse(*trans, bp); /* no locks for later trans */ + xfs_buf_corruption_error(bp); ++ xfs_trans_brelse(*trans, bp); /* no locks for later trans */ + return -EFSCORRUPTED; + } + +diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c +index 0d3b640cf1cc..871ec22c9aee 100644 +--- a/fs/xfs/xfs_dir2_readdir.c ++++ b/fs/xfs/xfs_dir2_readdir.c +@@ -147,7 +147,7 @@ xfs_dir2_block_getdents( + xfs_off_t cook; + struct xfs_da_geometry *geo = args->geo; + int lock_mode; +- unsigned int offset; ++ unsigned int offset, next_offset; + unsigned int end; + + /* +@@ -173,9 +173,10 @@ xfs_dir2_block_getdents( + * Loop over the data portion of the block. + * Each object is a real entry (dep) or an unused one (dup). + */ +- offset = geo->data_entry_offset; + end = xfs_dir3_data_end_offset(geo, bp->b_addr); +- while (offset < end) { ++ for (offset = geo->data_entry_offset; ++ offset < end; ++ offset = next_offset) { + struct xfs_dir2_data_unused *dup = bp->b_addr + offset; + struct xfs_dir2_data_entry *dep = bp->b_addr + offset; + uint8_t filetype; +@@ -184,14 +185,15 @@ xfs_dir2_block_getdents( + * Unused, skip it. + */ + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { +- offset += be16_to_cpu(dup->length); ++ next_offset = offset + be16_to_cpu(dup->length); + continue; + } + + /* + * Bump pointer for the next iteration. + */ +- offset += xfs_dir2_data_entsize(dp->i_mount, dep->namelen); ++ next_offset = offset + ++ xfs_dir2_data_entsize(dp->i_mount, dep->namelen); + + /* + * The entry is before the desired starting point, skip it. +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c +index f6006d94a581..796ff37d5bb5 100644 +--- a/fs/xfs/xfs_log.c ++++ b/fs/xfs/xfs_log.c +@@ -605,18 +605,23 @@ xfs_log_release_iclog( + struct xlog *log = mp->m_log; + bool sync; + +- if (iclog->ic_state == XLOG_STATE_IOERROR) { +- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); +- return -EIO; +- } ++ if (iclog->ic_state == XLOG_STATE_IOERROR) ++ goto error; + + if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) { ++ if (iclog->ic_state == XLOG_STATE_IOERROR) { ++ spin_unlock(&log->l_icloglock); ++ goto error; ++ } + sync = __xlog_state_release_iclog(log, iclog); + spin_unlock(&log->l_icloglock); + if (sync) + xlog_sync(log, iclog); + } + return 0; ++error: ++ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); ++ return -EIO; + } + + /* +diff --git a/include/acpi/processor.h b/include/acpi/processor.h +index 47805172e73d..683e124ad517 100644 +--- a/include/acpi/processor.h ++++ b/include/acpi/processor.h +@@ -297,6 +297,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx + } + #endif + ++static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, ++ bool direct) ++{ ++ if (direct || (is_percpu_thread() && cpu == smp_processor_id())) ++ return fn(arg); ++ return work_on_cpu(cpu, fn, arg); ++} ++ + /* in processor_perflib.c */ + + #ifdef CONFIG_CPU_FREQ +diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h +index b3f1082cc435..1c4fd950f091 100644 +--- a/include/asm-generic/mshyperv.h ++++ b/include/asm-generic/mshyperv.h +@@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, + return nr_bank; + } + +-void hyperv_report_panic(struct pt_regs *regs, long err); ++void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); + void hyperv_report_panic_msg(phys_addr_t pa, size_t size); + bool hv_is_hyperv_initialized(void); + bool hv_is_hibernation_supported(void); +diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h +index f6a7ba4dccd4..3fee04f81439 100644 +--- a/include/keys/big_key-type.h ++++ b/include/keys/big_key-type.h +@@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); + extern void big_key_revoke(struct key *key); + extern void big_key_destroy(struct key *key); + extern void big_key_describe(const struct key *big_key, struct seq_file *m); +-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); ++extern long big_key_read(const struct key *key, char *buffer, size_t buflen); + + #endif /* _KEYS_BIG_KEY_TYPE_H */ +diff --git a/include/keys/user-type.h b/include/keys/user-type.h +index d5e73266a81a..be61fcddc02a 100644 +--- a/include/keys/user-type.h ++++ b/include/keys/user-type.h +@@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); + extern void user_revoke(struct key *key); + extern void user_destroy(struct key *key); + extern void user_describe(const struct key *user, struct seq_file *m); +-extern long user_read(const struct key *key, +- char __user *buffer, size_t buflen); ++extern long user_read(const struct key *key, char *buffer, size_t buflen); + + static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) + { +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h +index 7b73ef7f902d..b56cc825f64d 100644 +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + void __brelse(struct buffer_head *); + void __bforget(struct buffer_head *); + void __breadahead(struct block_device *, sector_t block, unsigned int size); ++void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, ++ gfp_t gfp); + struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); + void invalidate_bh_lrus(void); +@@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) + __breadahead(sb->s_bdev, block, sb->s_blocksize); + } + ++static inline void ++sb_breadahead_unmovable(struct super_block *sb, sector_t block) ++{ ++ __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); ++} ++ + static inline struct buffer_head * + sb_getblk(struct super_block *sb, sector_t block) + { +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 5e88e7e33abe..034b0a644efc 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -347,7 +347,7 @@ static inline void *offset_to_ptr(const int *off) + * compiler has support to do so. + */ + #define compiletime_assert(condition, msg) \ +- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + + #define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h +index ac3f4888b3df..3c383ddd92dd 100644 +--- a/include/linux/f2fs_fs.h ++++ b/include/linux/f2fs_fs.h +@@ -125,6 +125,7 @@ struct f2fs_super_block { + /* + * For checkpoint + */ ++#define CP_RESIZEFS_FLAG 0x00004000 + #define CP_DISABLED_QUICK_FLAG 0x00002000 + #define CP_DISABLED_FLAG 0x00001000 + #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 1e897e4168ac..dafb3d70ff81 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -390,7 +390,10 @@ static inline bool is_file_hugepages(struct file *file) + return is_file_shm_hugepages(file); + } + +- ++static inline struct hstate *hstate_inode(struct inode *i) ++{ ++ return HUGETLBFS_SB(i->i_sb)->hstate; ++} + #else /* !CONFIG_HUGETLBFS */ + + #define is_file_hugepages(file) false +@@ -402,6 +405,10 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, + return ERR_PTR(-ENOSYS); + } + ++static inline struct hstate *hstate_inode(struct inode *i) ++{ ++ return NULL; ++} + #endif /* !CONFIG_HUGETLBFS */ + + #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +@@ -472,11 +479,6 @@ extern unsigned int default_hstate_idx; + + #define default_hstate (hstates[default_hstate_idx]) + +-static inline struct hstate *hstate_inode(struct inode *i) +-{ +- return HUGETLBFS_SB(i->i_sb)->hstate; +-} +- + static inline struct hstate *hstate_file(struct file *f) + { + return hstate_inode(file_inode(f)); +@@ -729,11 +731,6 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma) + return NULL; + } + +-static inline struct hstate *hstate_inode(struct inode *i) +-{ +- return NULL; +-} +- + static inline struct hstate *page_hstate(struct page *page) + { + return NULL; +diff --git a/include/linux/key-type.h b/include/linux/key-type.h +index 4ded94bcf274..2ab2d6d6aeab 100644 +--- a/include/linux/key-type.h ++++ b/include/linux/key-type.h +@@ -127,7 +127,7 @@ struct key_type { + * much is copied into the buffer + * - shouldn't do the copy if the buffer is NULL + */ +- long (*read)(const struct key *key, char __user *buffer, size_t buflen); ++ long (*read)(const struct key *key, char *buffer, size_t buflen); + + /* handle request_key() for this type instead of invoking + * /sbin/request-key (optional) +diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h +index 4f052496cdfd..0a4f54dd4737 100644 +--- a/include/linux/percpu_counter.h ++++ b/include/linux/percpu_counter.h +@@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) + */ + static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) + { +- s64 ret = fbc->count; ++ /* Prevent reloads of fbc->count */ ++ s64 ret = READ_ONCE(fbc->count); + +- barrier(); /* Prevent reloads of fbc->count */ + if (ret >= 0) + return ret; + return 0; +diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h +index ba5914770191..383243326676 100644 +--- a/include/linux/platform_data/cros_ec_proto.h ++++ b/include/linux/platform_data/cros_ec_proto.h +@@ -125,6 +125,9 @@ struct cros_ec_command { + * @host_event_wake_mask: Mask of host events that cause wake from suspend. + * @last_event_time: exact time from the hard irq when we got notified of + * a new event. ++ * @notifier_ready: The notifier_block to let the kernel re-query EC ++ * communication protocol when the EC sends ++ * EC_HOST_EVENT_INTERFACE_READY. + * @ec: The platform_device used by the mfd driver to interface with the + * main EC. + * @pd: The platform_device used by the mfd driver to interface with the +@@ -166,6 +169,7 @@ struct cros_ec_device { + u32 host_event_wake_mask; + u32 last_resume_result; + ktime_t last_event_time; ++ struct notifier_block notifier_ready; + + /* The platform devices used by the mfd driver */ + struct platform_device *ec; +diff --git a/include/linux/swapops.h b/include/linux/swapops.h +index 877fd239b6ff..3208a520d0be 100644 +--- a/include/linux/swapops.h ++++ b/include/linux/swapops.h +@@ -348,7 +348,8 @@ static inline void num_poisoned_pages_inc(void) + } + #endif + +-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) ++#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ ++ defined(CONFIG_DEVICE_PRIVATE) + static inline int non_swap_entry(swp_entry_t entry) + { + return swp_type(entry) >= MAX_SWAPFILES; +diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h +index b04c29270973..1ce3be63add1 100644 +--- a/include/trace/bpf_probe.h ++++ b/include/trace/bpf_probe.h +@@ -75,13 +75,17 @@ static inline void bpf_test_probe_##call(void) \ + check_trace_callback_type_##call(__bpf_trace_##template); \ + } \ + typedef void (*btf_trace_##call)(void *__data, proto); \ +-static struct bpf_raw_event_map __used \ +- __attribute__((section("__bpf_raw_tp_map"))) \ +-__bpf_trace_tp_map_##call = { \ +- .tp = &__tracepoint_##call, \ +- .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \ +- .num_args = COUNT_ARGS(args), \ +- .writable_size = size, \ ++static union { \ ++ struct bpf_raw_event_map event; \ ++ btf_trace_##call handler; \ ++} __bpf_trace_tp_map_##call __used \ ++__attribute__((section("__bpf_raw_tp_map"))) = { \ ++ .event = { \ ++ .tp = &__tracepoint_##call, \ ++ .bpf_func = __bpf_trace_##template, \ ++ .num_args = COUNT_ARGS(args), \ ++ .writable_size = size, \ ++ }, \ + }; + + #define FIRST(x, ...) x +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 966b7b34cde0..3b92aea18ae7 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -592,9 +592,7 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma) + { + struct bpf_map *map = vma->vm_file->private_data; + +- bpf_map_inc_with_uref(map); +- +- if (vma->vm_flags & VM_WRITE) { ++ if (vma->vm_flags & VM_MAYWRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt++; + mutex_unlock(&map->freeze_mutex); +@@ -606,13 +604,11 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma) + { + struct bpf_map *map = vma->vm_file->private_data; + +- if (vma->vm_flags & VM_WRITE) { ++ if (vma->vm_flags & VM_MAYWRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt--; + mutex_unlock(&map->freeze_mutex); + } +- +- bpf_map_put_with_uref(map); + } + + static const struct vm_operations_struct bpf_map_default_vmops = { +@@ -641,14 +637,16 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) + /* set default open/close callbacks */ + vma->vm_ops = &bpf_map_default_vmops; + vma->vm_private_data = map; ++ vma->vm_flags &= ~VM_MAYEXEC; ++ if (!(vma->vm_flags & VM_WRITE)) ++ /* disallow re-mapping with PROT_WRITE */ ++ vma->vm_flags &= ~VM_MAYWRITE; + + err = map->ops->map_mmap(map, vma); + if (err) + goto out; + +- bpf_map_inc_with_uref(map); +- +- if (vma->vm_flags & VM_WRITE) ++ if (vma->vm_flags & VM_MAYWRITE) + map->writecnt++; + out: + mutex_unlock(&map->freeze_mutex); +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 595b39eee642..e5d12c54b552 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -227,8 +227,7 @@ struct bpf_call_arg_meta { + bool pkt_access; + int regno; + int access_size; +- s64 msize_smax_value; +- u64 msize_umax_value; ++ u64 msize_max_value; + int ref_obj_id; + int func_id; + u32 btf_id; +@@ -3568,8 +3567,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, + /* remember the mem_size which may be used later + * to refine return values. + */ +- meta->msize_smax_value = reg->smax_value; +- meta->msize_umax_value = reg->umax_value; ++ meta->msize_max_value = reg->umax_value; + + /* The register is SCALAR_VALUE; the access check + * happens using its boundaries. +@@ -4095,21 +4093,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) + return 0; + } + +-static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, +- int func_id, +- struct bpf_call_arg_meta *meta) ++static int do_refine_retval_range(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, int ret_type, ++ int func_id, struct bpf_call_arg_meta *meta) + { + struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; ++ struct bpf_reg_state tmp_reg = *ret_reg; ++ bool ret; + + if (ret_type != RET_INTEGER || + (func_id != BPF_FUNC_get_stack && + func_id != BPF_FUNC_probe_read_str)) +- return; ++ return 0; ++ ++ /* Error case where ret is in interval [S32MIN, -1]. */ ++ ret_reg->smin_value = S32_MIN; ++ ret_reg->smax_value = -1; + +- ret_reg->smax_value = meta->msize_smax_value; +- ret_reg->umax_value = meta->msize_umax_value; + __reg_deduce_bounds(ret_reg); + __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false); ++ if (!ret) ++ return -EFAULT; ++ ++ *ret_reg = tmp_reg; ++ ++ /* Success case where ret is in range [0, msize_max_value]. */ ++ ret_reg->smin_value = 0; ++ ret_reg->smax_value = meta->msize_max_value; ++ ret_reg->umin_value = ret_reg->smin_value; ++ ret_reg->umax_value = ret_reg->smax_value; ++ ++ __reg_deduce_bounds(ret_reg); ++ __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ return 0; + } + + static int +@@ -4377,7 +4398,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn + regs[BPF_REG_0].ref_obj_id = id; + } + +- do_refine_retval_range(regs, fn->ret_type, func_id, &meta); ++ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); ++ if (err) ++ return err; + + err = check_map_func_compatibility(env, meta.map_ptr, func_id); + if (err) +diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c +index 551b0eb7028a..2a0c4985f38e 100644 +--- a/kernel/dma/coherent.c ++++ b/kernel/dma/coherent.c +@@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev, + + spin_lock_irqsave(&mem->spinlock, flags); + +- if (unlikely(size > (mem->size << PAGE_SHIFT))) ++ if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) + goto err; + + pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); +@@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev, + /* + * Memory was found in the coherent area. + */ +- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); +- ret = mem->virt_base + (pageno << PAGE_SHIFT); ++ *dma_handle = dma_get_device_base(dev, mem) + ++ ((dma_addr_t)pageno << PAGE_SHIFT); ++ ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); + spin_unlock_irqrestore(&mem->spinlock, flags); + memset(ret, 0, size); + return ret; +@@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) + { + if (mem && vaddr >= mem->virt_base && vaddr < +- (mem->virt_base + (mem->size << PAGE_SHIFT))) { ++ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; + unsigned long flags; + +@@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) + { + if (mem && vaddr >= mem->virt_base && vaddr + size <= +- (mem->virt_base + (mem->size << PAGE_SHIFT))) { ++ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { + unsigned long off = vma->vm_pgoff; + int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; +- int user_count = vma_pages(vma); ++ unsigned long user_count = vma_pages(vma); + int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + *ret = -ENXIO; +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c +index 2031ed1ad7fa..9e1777c81f55 100644 +--- a/kernel/dma/debug.c ++++ b/kernel/dma/debug.c +@@ -137,9 +137,12 @@ static const char *const maperr2str[] = { + [MAP_ERR_CHECKED] = "dma map error checked", + }; + +-static const char *type2name[5] = { "single", "page", +- "scather-gather", "coherent", +- "resource" }; ++static const char *type2name[] = { ++ [dma_debug_single] = "single", ++ [dma_debug_sg] = "scather-gather", ++ [dma_debug_coherent] = "coherent", ++ [dma_debug_resource] = "resource", ++}; + + static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE" }; +diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c +index 99475a66c94f..687c1d83dc20 100644 +--- a/kernel/locking/locktorture.c ++++ b/kernel/locking/locktorture.c +@@ -696,10 +696,10 @@ static void __torture_print_stats(char *page, + if (statp[i].n_lock_fail) + fail = true; + sum += statp[i].n_lock_acquired; +- if (max < statp[i].n_lock_fail) +- max = statp[i].n_lock_fail; +- if (min > statp[i].n_lock_fail) +- min = statp[i].n_lock_fail; ++ if (max < statp[i].n_lock_acquired) ++ max = statp[i].n_lock_acquired; ++ if (min > statp[i].n_lock_acquired) ++ min = statp[i].n_lock_acquired; + } + page += sprintf(page, + "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 69def4a9df00..ab9af2e052ca 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -241,6 +241,8 @@ config DEBUG_INFO_DWARF4 + config DEBUG_INFO_BTF + bool "Generate BTF typeinfo" + depends on DEBUG_INFO ++ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED ++ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST + help + Generate deduplicated BTF type information from DWARF debug info. + Turning this on expects presence of pahole tool, which will convert +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c +index 3e1a90669006..ad53eb31d40f 100644 +--- a/net/dns_resolver/dns_key.c ++++ b/net/dns_resolver/dns_key.c +@@ -302,7 +302,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) + * - the key's semaphore is read-locked + */ + static long dns_resolver_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + int err = PTR_ERR(key->payload.data[dns_key_error]); + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index d11f1a74d43c..68ec31c4ae65 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3950,7 +3950,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + NFT_SET_INTERVAL | NFT_SET_TIMEOUT | + NFT_SET_MAP | NFT_SET_EVAL | + NFT_SET_OBJECT)) +- return -EINVAL; ++ return -EOPNOTSUPP; + /* Only one of these operations is supported */ + if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) == + (NFT_SET_MAP | NFT_SET_OBJECT)) +@@ -3988,7 +3988,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); + if (objtype == NFT_OBJECT_UNSPEC || + objtype > NFT_OBJECT_MAX) +- return -EINVAL; ++ return -EOPNOTSUPP; + } else if (flags & NFT_SET_OBJECT) + return -EINVAL; + else +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index 8617fc16a1ed..46d976969ca3 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -218,27 +218,26 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + + /* Detect overlaps as we descend the tree. Set the flag in these cases: + * +- * a1. |__ _ _? >|__ _ _ (insert start after existing start) +- * a2. _ _ __>| ?_ _ __| (insert end before existing end) +- * a3. _ _ ___| ?_ _ _>| (insert end after existing end) +- * a4. >|__ _ _ _ _ __| (insert start before existing end) ++ * a1. _ _ __>| ?_ _ __| (insert end before existing end) ++ * a2. _ _ ___| ?_ _ _>| (insert end after existing end) ++ * a3. _ _ ___? >|_ _ __| (insert start before existing end) + * + * and clear it later on, as we eventually reach the points indicated by + * '?' above, in the cases described below. We'll always meet these + * later, locally, due to tree ordering, and overlaps for the intervals + * that are the closest together are always evaluated last. + * +- * b1. |__ _ _! >|__ _ _ (insert start after existing end) +- * b2. _ _ __>| !_ _ __| (insert end before existing start) +- * b3. !_____>| (insert end after existing start) ++ * b1. _ _ __>| !_ _ __| (insert end before existing start) ++ * b2. _ _ ___| !_ _ _>| (insert end after existing start) ++ * b3. _ _ ___! >|_ _ __| (insert start after existing end) + * +- * Case a4. resolves to b1.: ++ * Case a3. resolves to b3.: + * - if the inserted start element is the leftmost, because the '0' + * element in the tree serves as end element + * - otherwise, if an existing end is found. Note that end elements are + * always inserted after corresponding start elements. + * +- * For a new, rightmost pair of elements, we'll hit cases b1. and b3., ++ * For a new, rightmost pair of elements, we'll hit cases b3. and b2., + * in that order. + * + * The flag is also cleared in two special cases: +@@ -262,9 +261,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + p = &parent->rb_left; + + if (nft_rbtree_interval_start(new)) { +- overlap = nft_rbtree_interval_start(rbe) && +- nft_set_elem_active(&rbe->ext, +- genmask); ++ if (nft_rbtree_interval_end(rbe) && ++ nft_set_elem_active(&rbe->ext, genmask)) ++ overlap = false; + } else { + overlap = nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, +diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c +index 6c3f35fac42d..0c98313dd7a8 100644 +--- a/net/rxrpc/key.c ++++ b/net/rxrpc/key.c +@@ -31,7 +31,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); + static void rxrpc_destroy(struct key *); + static void rxrpc_destroy_s(struct key *); + static void rxrpc_describe(const struct key *, struct seq_file *); +-static long rxrpc_read(const struct key *, char __user *, size_t); ++static long rxrpc_read(const struct key *, char *, size_t); + + /* + * rxrpc defined keys take an arbitrary string as the description and an +@@ -1042,12 +1042,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); + * - this returns the result in XDR form + */ + static long rxrpc_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + const struct rxrpc_key_token *token; + const struct krb5_principal *princ; + size_t size; +- __be32 __user *xdr, *oldxdr; ++ __be32 *xdr, *oldxdr; + u32 cnlen, toksize, ntoks, tok, zero; + u16 toksizes[AFSTOKEN_MAX]; + int loop; +@@ -1124,30 +1124,25 @@ static long rxrpc_read(const struct key *key, + if (!buffer || buflen < size) + return size; + +- xdr = (__be32 __user *) buffer; ++ xdr = (__be32 *)buffer; + zero = 0; + #define ENCODE(x) \ + do { \ +- __be32 y = htonl(x); \ +- if (put_user(y, xdr++) < 0) \ +- goto fault; \ ++ *xdr++ = htonl(x); \ + } while(0) + #define ENCODE_DATA(l, s) \ + do { \ + u32 _l = (l); \ + ENCODE(l); \ +- if (copy_to_user(xdr, (s), _l) != 0) \ +- goto fault; \ +- if (_l & 3 && \ +- copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ +- goto fault; \ ++ memcpy(xdr, (s), _l); \ ++ if (_l & 3) \ ++ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) + #define ENCODE64(x) \ + do { \ + __be64 y = cpu_to_be64(x); \ +- if (copy_to_user(xdr, &y, 8) != 0) \ +- goto fault; \ ++ memcpy(xdr, &y, 8); \ + xdr += 8 >> 2; \ + } while(0) + #define ENCODE_STR(s) \ +@@ -1238,8 +1233,4 @@ static long rxrpc_read(const struct key *key, + ASSERTCMP((char __user *) xdr - buffer, ==, size); + _leave(" = %zu", size); + return size; +- +-fault: +- _leave(" = -EFAULT"); +- return -EFAULT; + } +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 24ca861815b1..2dc740acb3bf 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -20,6 +20,7 @@ + #include <linux/sunrpc/clnt.h> + #include <linux/sunrpc/auth.h> + #include <linux/sunrpc/auth_gss.h> ++#include <linux/sunrpc/gss_krb5.h> + #include <linux/sunrpc/svcauth_gss.h> + #include <linux/sunrpc/gss_err.h> + #include <linux/workqueue.h> +@@ -1050,7 +1051,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) + goto err_put_mech; + auth = &gss_auth->rpc_auth; + auth->au_cslack = GSS_CRED_SLACK >> 2; +- auth->au_rslack = GSS_VERF_SLACK >> 2; ++ auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2; + auth->au_verfsize = GSS_VERF_SLACK >> 2; + auth->au_ralign = GSS_VERF_SLACK >> 2; + auth->au_flags = 0; +@@ -1934,35 +1935,69 @@ gss_unwrap_resp_auth(struct rpc_cred *cred) + return 0; + } + ++/* ++ * RFC 2203, Section 5.3.2.2 ++ * ++ * struct rpc_gss_integ_data { ++ * opaque databody_integ<>; ++ * opaque checksum<>; ++ * }; ++ * ++ * struct rpc_gss_data_t { ++ * unsigned int seq_num; ++ * proc_req_arg_t arg; ++ * }; ++ */ + static int + gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, + struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, + struct xdr_stream *xdr) + { +- struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf; +- u32 data_offset, mic_offset, integ_len, maj_stat; ++ struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf; + struct rpc_auth *auth = cred->cr_auth; ++ u32 len, offset, seqno, maj_stat; + struct xdr_netobj mic; +- __be32 *p; ++ int ret; + +- p = xdr_inline_decode(xdr, 2 * sizeof(*p)); +- if (unlikely(!p)) ++ ret = -EIO; ++ mic.data = NULL; ++ ++ /* opaque databody_integ<>; */ ++ if (xdr_stream_decode_u32(xdr, &len)) + goto unwrap_failed; +- integ_len = be32_to_cpup(p++); +- if (integ_len & 3) ++ if (len & 3) + goto unwrap_failed; +- data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base; +- mic_offset = integ_len + data_offset; +- if (mic_offset > rcv_buf->len) ++ offset = rcv_buf->len - xdr_stream_remaining(xdr); ++ if (xdr_stream_decode_u32(xdr, &seqno)) + goto unwrap_failed; +- if (be32_to_cpup(p) != rqstp->rq_seqno) ++ if (seqno != rqstp->rq_seqno) + goto bad_seqno; ++ if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) ++ goto unwrap_failed; + +- if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len)) ++ /* ++ * The xdr_stream now points to the beginning of the ++ * upper layer payload, to be passed below to ++ * rpcauth_unwrap_resp_decode(). The checksum, which ++ * follows the upper layer payload in @rcv_buf, is ++ * located and parsed without updating the xdr_stream. ++ */ ++ ++ /* opaque checksum<>; */ ++ offset += len; ++ if (xdr_decode_word(rcv_buf, offset, &len)) ++ goto unwrap_failed; ++ offset += sizeof(__be32); ++ if (offset + len > rcv_buf->len) + goto unwrap_failed; +- if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset)) ++ mic.len = len; ++ mic.data = kmalloc(len, GFP_NOFS); ++ if (!mic.data) ++ goto unwrap_failed; ++ if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len)) + goto unwrap_failed; +- maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); ++ ++ maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic); + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + if (maj_stat != GSS_S_COMPLETE) +@@ -1970,16 +2005,21 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, + + auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len); + auth->au_ralign = auth->au_verfsize + 2; +- return 0; ++ ret = 0; ++ ++out: ++ kfree(mic.data); ++ return ret; ++ + unwrap_failed: + trace_rpcgss_unwrap_failed(task); +- return -EIO; ++ goto out; + bad_seqno: +- trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p)); +- return -EIO; ++ trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); ++ goto out; + bad_mic: + trace_rpcgss_verify_mic(task, maj_stat); +- return -EIO; ++ goto out; + } + + static int +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c +index fa7bb5e060d0..ed7a6060f73c 100644 +--- a/net/xdp/xdp_umem.c ++++ b/net/xdp/xdp_umem.c +@@ -343,7 +343,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + u32 chunk_size = mr->chunk_size, headroom = mr->headroom; + unsigned int chunks, chunks_per_page; + u64 addr = mr->addr, size = mr->len; +- int size_chk, err; ++ int err; + + if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { + /* Strictly speaking we could support this, if: +@@ -382,8 +382,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + return -EINVAL; + } + +- size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; +- if (size_chk < 0) ++ if (headroom >= chunk_size - XDP_PACKET_HEADROOM) + return -EINVAL; + + umem->address = (unsigned long)addr; +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c +index 356f90e4522b..c350108aa38d 100644 +--- a/net/xdp/xsk.c ++++ b/net/xdp/xsk.c +@@ -131,8 +131,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, + u64 page_start = addr & ~(PAGE_SIZE - 1); + u64 first_len = PAGE_SIZE - (addr - page_start); + +- memcpy(to_buf, from_buf, first_len + metalen); +- memcpy(next_pg_addr, from_buf + first_len, len - first_len); ++ memcpy(to_buf, from_buf, first_len); ++ memcpy(next_pg_addr, from_buf + first_len, ++ len + metalen - first_len); + + return; + } +diff --git a/security/keys/big_key.c b/security/keys/big_key.c +index 001abe530a0d..82008f900930 100644 +--- a/security/keys/big_key.c ++++ b/security/keys/big_key.c +@@ -352,7 +352,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) + * read the key data + * - the key's semaphore is read-locked + */ +-long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ++long big_key_read(const struct key *key, char *buffer, size_t buflen) + { + size_t datalen = (size_t)key->payload.data[big_key_len]; + long ret; +@@ -391,9 +391,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) + + ret = datalen; + +- /* copy decrypted data to user */ +- if (copy_to_user(buffer, buf->virt, datalen) != 0) +- ret = -EFAULT; ++ /* copy out decrypted data */ ++ memcpy(buffer, buf->virt, datalen); + + err_fput: + fput(file); +@@ -401,9 +400,7 @@ error: + big_key_free_buffer(buf); + } else { + ret = datalen; +- if (copy_to_user(buffer, key->payload.data[big_key_data], +- datalen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, key->payload.data[big_key_data], datalen); + } + + return ret; +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c +index 60720f58cbe0..f6797ba44bf7 100644 +--- a/security/keys/encrypted-keys/encrypted.c ++++ b/security/keys/encrypted-keys/encrypted.c +@@ -902,14 +902,14 @@ out: + } + + /* +- * encrypted_read - format and copy the encrypted data to userspace ++ * encrypted_read - format and copy out the encrypted data + * + * The resulting datablob format is: + * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> + * + * On success, return to userspace the encrypted key datablob size. + */ +-static long encrypted_read(const struct key *key, char __user *buffer, ++static long encrypted_read(const struct key *key, char *buffer, + size_t buflen) + { + struct encrypted_key_payload *epayload; +@@ -957,8 +957,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, + key_put(mkey); + memzero_explicit(derived_key, sizeof(derived_key)); + +- if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) +- ret = -EFAULT; ++ memcpy(buffer, ascii_buf, asciiblob_len); + kzfree(ascii_buf); + + return asciiblob_len; +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index d1a3dea58dee..106e16f9006b 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -797,6 +797,21 @@ error: + return ret; + } + ++/* ++ * Call the read method ++ */ ++static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) ++{ ++ long ret; ++ ++ down_read(&key->sem); ++ ret = key_validate(key); ++ if (ret == 0) ++ ret = key->type->read(key, buffer, buflen); ++ up_read(&key->sem); ++ return ret; ++} ++ + /* + * Read a key's payload. + * +@@ -812,26 +827,27 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + struct key *key; + key_ref_t key_ref; + long ret; ++ char *key_data; + + /* find the key first */ + key_ref = lookup_user_key(keyid, 0, 0); + if (IS_ERR(key_ref)) { + ret = -ENOKEY; +- goto error; ++ goto out; + } + + key = key_ref_to_ptr(key_ref); + + ret = key_read_state(key); + if (ret < 0) +- goto error2; /* Negatively instantiated */ ++ goto key_put_out; /* Negatively instantiated */ + + /* see if we can read it directly */ + ret = key_permission(key_ref, KEY_NEED_READ); + if (ret == 0) + goto can_read_key; + if (ret != -EACCES) +- goto error2; ++ goto key_put_out; + + /* we can't; see if it's searchable from this process's keyrings + * - we automatically take account of the fact that it may be +@@ -839,26 +855,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + */ + if (!is_key_possessed(key_ref)) { + ret = -EACCES; +- goto error2; ++ goto key_put_out; + } + + /* the key is probably readable - now try to read it */ + can_read_key: +- ret = -EOPNOTSUPP; +- if (key->type->read) { +- /* Read the data with the semaphore held (since we might sleep) +- * to protect against the key being updated or revoked. +- */ +- down_read(&key->sem); +- ret = key_validate(key); +- if (ret == 0) +- ret = key->type->read(key, buffer, buflen); +- up_read(&key->sem); ++ if (!key->type->read) { ++ ret = -EOPNOTSUPP; ++ goto key_put_out; + } + +-error2: ++ if (!buffer || !buflen) { ++ /* Get the key length from the read method */ ++ ret = __keyctl_read_key(key, NULL, 0); ++ goto key_put_out; ++ } ++ ++ /* ++ * Read the data with the semaphore held (since we might sleep) ++ * to protect against the key being updated or revoked. ++ * ++ * Allocating a temporary buffer to hold the keys before ++ * transferring them to user buffer to avoid potential ++ * deadlock involving page fault and mmap_sem. ++ */ ++ key_data = kmalloc(buflen, GFP_KERNEL); ++ ++ if (!key_data) { ++ ret = -ENOMEM; ++ goto key_put_out; ++ } ++ ret = __keyctl_read_key(key, key_data, buflen); ++ ++ /* ++ * Read methods will just return the required length without ++ * any copying if the provided length isn't large enough. ++ */ ++ if (ret > 0 && ret <= buflen) { ++ if (copy_to_user(buffer, key_data, ret)) ++ ret = -EFAULT; ++ } ++ kzfree(key_data); ++ ++key_put_out: + key_put(key); +-error: ++out: + return ret; + } + +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index febf36c6ddc5..5ca620d31cd3 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -459,7 +459,6 @@ static int keyring_read_iterator(const void *object, void *data) + { + struct keyring_read_iterator_context *ctx = data; + const struct key *key = keyring_ptr_to_key(object); +- int ret; + + kenter("{%s,%d},,{%zu/%zu}", + key->type->name, key->serial, ctx->count, ctx->buflen); +@@ -467,10 +466,7 @@ static int keyring_read_iterator(const void *object, void *data) + if (ctx->count >= ctx->buflen) + return 1; + +- ret = put_user(key->serial, ctx->buffer); +- if (ret < 0) +- return ret; +- ctx->buffer++; ++ *ctx->buffer++ = key->serial; + ctx->count += sizeof(key->serial); + return 0; + } +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c +index ecba39c93fd9..41e9735006d0 100644 +--- a/security/keys/request_key_auth.c ++++ b/security/keys/request_key_auth.c +@@ -22,7 +22,7 @@ static int request_key_auth_instantiate(struct key *, + static void request_key_auth_describe(const struct key *, struct seq_file *); + static void request_key_auth_revoke(struct key *); + static void request_key_auth_destroy(struct key *); +-static long request_key_auth_read(const struct key *, char __user *, size_t); ++static long request_key_auth_read(const struct key *, char *, size_t); + + /* + * The request-key authorisation key type definition. +@@ -80,7 +80,7 @@ static void request_key_auth_describe(const struct key *key, + * - the key's semaphore is read-locked + */ + static long request_key_auth_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + struct request_key_auth *rka = dereference_key_locked(key); + size_t datalen; +@@ -97,8 +97,7 @@ static long request_key_auth_read(const struct key *key, + if (buflen > datalen) + buflen = datalen; + +- if (copy_to_user(buffer, rka->callout_info, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, rka->callout_info, buflen); + } + + return ret; +diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c +index d2c5ec1e040b..8001ab07e63b 100644 +--- a/security/keys/trusted-keys/trusted_tpm1.c ++++ b/security/keys/trusted-keys/trusted_tpm1.c +@@ -1130,11 +1130,10 @@ out: + * trusted_read - copy the sealed blob data to userspace in hex. + * On success, return to userspace the trusted key datablob size. + */ +-static long trusted_read(const struct key *key, char __user *buffer, ++static long trusted_read(const struct key *key, char *buffer, + size_t buflen) + { + const struct trusted_key_payload *p; +- char *ascii_buf; + char *bufp; + int i; + +@@ -1143,18 +1142,9 @@ static long trusted_read(const struct key *key, char __user *buffer, + return -EINVAL; + + if (buffer && buflen >= 2 * p->blob_len) { +- ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL); +- if (!ascii_buf) +- return -ENOMEM; +- +- bufp = ascii_buf; ++ bufp = buffer; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); +- if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { +- kzfree(ascii_buf); +- return -EFAULT; +- } +- kzfree(ascii_buf); + } + return 2 * p->blob_len; + } +diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c +index 6f12de4ce549..07d4287e9084 100644 +--- a/security/keys/user_defined.c ++++ b/security/keys/user_defined.c +@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(user_describe); + * read the key data + * - the key's semaphore is read-locked + */ +-long user_read(const struct key *key, char __user *buffer, size_t buflen) ++long user_read(const struct key *key, char *buffer, size_t buflen) + { + const struct user_key_payload *upayload; + long ret; +@@ -181,8 +181,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) + if (buflen > upayload->datalen) + buflen = upayload->datalen; + +- if (copy_to_user(buffer, upayload->data, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, upayload->data, buflen); + } + + return ret; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index bd093593f8fb..f41d8b7864c1 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1071,6 +1071,8 @@ static int azx_freeze_noirq(struct device *dev) + struct azx *chip = card->private_data; + struct pci_dev *pci = to_pci_dev(dev); + ++ if (!azx_is_pm_ready(card)) ++ return 0; + if (chip->driver_type == AZX_DRIVER_SKL) + pci_set_power_state(pci, PCI_D3hot); + +@@ -1083,6 +1085,8 @@ static int azx_thaw_noirq(struct device *dev) + struct azx *chip = card->private_data; + struct pci_dev *pci = to_pci_dev(dev); + ++ if (!azx_is_pm_ready(card)) ++ return 0; + if (chip->driver_type == AZX_DRIVER_SKL) + pci_set_power_state(pci, PCI_D0); + +@@ -2027,24 +2031,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context) + { + struct snd_card *card = context; + struct azx *chip = card->private_data; +- struct pci_dev *pci = chip->pci; + +- if (!fw) { +- dev_err(card->dev, "Cannot load firmware, aborting\n"); +- goto error; +- } +- +- chip->fw = fw; ++ if (fw) ++ chip->fw = fw; ++ else ++ dev_err(card->dev, "Cannot load firmware, continue without patching\n"); + if (!chip->disabled) { + /* continue probing */ +- if (azx_probe_continue(chip)) +- goto error; ++ azx_probe_continue(chip); + } +- return; /* OK */ +- +- error: +- snd_card_free(card); +- pci_set_drvdata(pci, NULL); + } + #endif + +diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c +index 431bd25c6cdb..6d47345a310b 100644 +--- a/tools/lib/bpf/netlink.c ++++ b/tools/lib/bpf/netlink.c +@@ -289,7 +289,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + + static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) + { +- if (info->attach_mode != XDP_ATTACHED_MULTI) ++ if (info->attach_mode != XDP_ATTACHED_MULTI && !flags) + return info->prog_id; + if (flags & XDP_FLAGS_DRV_MODE) + return info->drv_prog_id; +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index 4768d91c6d68..2b765bbbef92 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -1011,10 +1011,7 @@ static struct rela *find_jump_table(struct objtool_file *file, + * it. + */ + for (; +- &insn->list != &file->insn_list && +- insn->sec == func->sec && +- insn->offset >= func->offset; +- ++ &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { + + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) +diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +index eba9a970703b..925722217edf 100644 +--- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c ++++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +@@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) + void test_get_stack_raw_tp(void) + { + const char *file = "./test_get_stack_rawtp.o"; ++ const char *file_err = "./test_get_stack_rawtp_err.o"; + const char *prog_name = "raw_tracepoint/sys_enter"; + int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; + struct perf_buffer_opts pb_opts = {}; +@@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void) + struct bpf_map *map; + cpu_set_t cpu_set; + ++ err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); ++ if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno)) ++ return; ++ + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; +diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c +new file mode 100644 +index 000000000000..8941a41c2a55 +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c +@@ -0,0 +1,26 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include <linux/bpf.h> ++#include <bpf/bpf_helpers.h> ++ ++#define MAX_STACK_RAWTP 10 ++ ++SEC("raw_tracepoint/sys_enter") ++int bpf_prog2(void *ctx) ++{ ++ __u64 stack[MAX_STACK_RAWTP]; ++ int error; ++ ++ /* set all the flags which should return -EINVAL */ ++ error = bpf_get_stack(ctx, stack, 0, -1); ++ if (error < 0) ++ goto loop; ++ ++ return error; ++loop: ++ while (1) { ++ error++; ++ } ++} ++ ++char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c +index f24d50f09dbe..371926771db5 100644 +--- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c ++++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c +@@ -9,17 +9,17 @@ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), +- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), +- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2), + BPF_MOV64_IMM(BPF_REG_4, 256), + BPF_EMIT_CALL(BPF_FUNC_get_stack), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), +- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), ++ BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16), + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), +@@ -29,7 +29,7 @@ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), +- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), |