summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1009_linux-5.17.10.patch8138
2 files changed, 8142 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ad4b906b..fcdbf704 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-5.17.9.patch
From: http://www.kernel.org
Desc: Linux 5.17.9
+Patch: 1009_linux-5.17.10.patch
+From: http://www.kernel.org
+Desc: Linux 5.17.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-5.17.10.patch b/1009_linux-5.17.10.patch
new file mode 100644
index 00000000..ff23964b
--- /dev/null
+++ b/1009_linux-5.17.10.patch
@@ -0,0 +1,8138 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index ea281dd755171..29b136849d300 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -189,6 +189,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
+++----------------+-----------------+-----------------+-----------------------------+
++
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+index 57b68d6c7c70d..eb6e2f2dc9eb0 100644
+--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+@@ -58,7 +58,7 @@ patternProperties:
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
+ ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
+- EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
++ EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP,
+ GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+ GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
+ I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
+diff --git a/Makefile b/Makefile
+index aba139bbd1c70..318597a4147e0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 17
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Superb Owl
+
+diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+index e4775bbceecc6..ac07c240419a2 100644
+--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+@@ -117,11 +117,6 @@
+ groups = "FWSPID";
+ };
+
+- pinctrl_fwqspid_default: fwqspid_default {
+- function = "FWSPID";
+- groups = "FWQSPID";
+- };
+-
+ pinctrl_fwspiwp_default: fwspiwp_default {
+ function = "FWSPIWP";
+ groups = "FWSPIWP";
+@@ -653,12 +648,12 @@
+ };
+
+ pinctrl_qspi1_default: qspi1_default {
+- function = "QSPI1";
++ function = "SPI1";
+ groups = "QSPI1";
+ };
+
+ pinctrl_qspi2_default: qspi2_default {
+- function = "QSPI2";
++ function = "SPI2";
+ groups = "QSPI2";
+ };
+
+diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
+index c32e87fad4dc9..aac55b3aeded4 100644
+--- a/arch/arm/boot/dts/aspeed-g6.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6.dtsi
+@@ -389,6 +389,16 @@
+ reg = <0x1e6f2000 0x1000>;
+ };
+
++ video: video@1e700000 {
++ compatible = "aspeed,ast2600-video-engine";
++ reg = <0x1e700000 0x1000>;
++ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
++ <&syscon ASPEED_CLK_GATE_ECLK>;
++ clock-names = "vclk", "eclk";
++ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
+ gpio0: gpio@1e780000 {
+ #gpio-cells = <2>;
+ gpio-controller;
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index ee3f7a599181e..4bbd92d41031f 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1040,7 +1040,7 @@ vector_bhb_loop8_\name:
+
+ @ bhb workaround
+ mov r0, #8
+-3: b . + 4
++3: W(b) . + 4
+ subs r0, r0, #1
+ bne 3b
+ dsb
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index 75e905508f279..f0c390e9d3cee 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -54,17 +54,17 @@ int notrace unwind_frame(struct stackframe *frame)
+ return -EINVAL;
+
+ frame->sp = frame->fp;
+- frame->fp = *(unsigned long *)(fp);
+- frame->pc = *(unsigned long *)(fp + 4);
++ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
++ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
+ #else
+ /* check current frame pointer is within bounds */
+ if (fp < low + 12 || fp > high - 4)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+- frame->fp = *(unsigned long *)(fp - 12);
+- frame->sp = *(unsigned long *)(fp - 8);
+- frame->pc = *(unsigned long *)(fp - 4);
++ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
++ frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
++ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
+ #endif
+ #ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+index 06dbfb968182d..fb9f3eb6bf483 100644
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
+ {
+ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+ cpu_v7_spectre_v2_init();
++ cpu_v7_spectre_bhb_init();
+ }
+
+ void cpu_v7_bugs_init(void)
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+index fb99cc2827c76..7ab3627cc347d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+@@ -622,6 +622,10 @@
+ status = "okay";
+ };
+
++&rxmacro {
++ status = "okay";
++};
++
+ &slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8250/slpi.mbn";
+@@ -773,6 +777,8 @@
+ };
+
+ &swr1 {
++ status = "okay";
++
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+@@ -781,6 +787,8 @@
+ };
+
+ &swr2 {
++ status = "okay";
++
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+@@ -819,6 +827,10 @@
+ };
+ };
+
++&txmacro {
++ status = "okay";
++};
++
+ &uart12 {
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index a92230bec1ddb..bd212f6c351f1 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2150,6 +2150,7 @@
+ pinctrl-0 = <&rx_swr_active>;
+ compatible = "qcom,sm8250-lpass-rx-macro";
+ reg = <0 0x3200000 0 0x1000>;
++ status = "disabled";
+
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+@@ -2168,6 +2169,7 @@
+ swr1: soundwire-controller@3210000 {
+ reg = <0 0x3210000 0 0x2000>;
+ compatible = "qcom,soundwire-v1.5.1";
++ status = "disabled";
+ interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rxmacro>;
+ clock-names = "iface";
+@@ -2195,6 +2197,7 @@
+ pinctrl-0 = <&tx_swr_active>;
+ compatible = "qcom,sm8250-lpass-tx-macro";
+ reg = <0 0x3220000 0 0x1000>;
++ status = "disabled";
+
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+@@ -2218,6 +2221,7 @@
+ compatible = "qcom,soundwire-v1.5.1";
+ interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "core";
++ status = "disabled";
+
+ clocks = <&txmacro>;
+ clock-names = "iface";
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 146fa2e76834d..10c865e311a05 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_1286807
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
++ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
++ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
+ },
+ #endif
+ {},
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index f418ebc65f950..8a25b9df430ea 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -76,6 +76,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
+ mte_sync_page_tags(page, old_pte, check_swap,
+ pte_is_tagged);
+ }
++
++ /* ensure the tags are visible before the PTE is set */
++ smp_wmb();
+ }
+
+ int memcmp_pages(struct page *page1, struct page *page2)
+diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
+index 75fed4460407d..57c7c211f8c71 100644
+--- a/arch/arm64/kernel/paravirt.c
++++ b/arch/arm64/kernel/paravirt.c
+@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
+ DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+
+ struct pv_time_stolen_time_region {
+- struct pvclock_vcpu_stolen_time *kaddr;
++ struct pvclock_vcpu_stolen_time __rcu *kaddr;
+ };
+
+ static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
+@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
+ /* return stolen time in ns by asking the hypervisor */
+ static u64 para_steal_clock(int cpu)
+ {
++ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
++ u64 ret = 0;
+
+ reg = per_cpu_ptr(&stolen_time_region, cpu);
+
+@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
+ * online notification callback runs. Until the callback
+ * has run we just return zero.
+ */
+- if (!reg->kaddr)
++ rcu_read_lock();
++ kaddr = rcu_dereference(reg->kaddr);
++ if (!kaddr) {
++ rcu_read_unlock();
+ return 0;
++ }
+
+- return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
++ ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
++ rcu_read_unlock();
++ return ret;
+ }
+
+ static int stolen_time_cpu_down_prepare(unsigned int cpu)
+ {
++ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
+
+ reg = this_cpu_ptr(&stolen_time_region);
+ if (!reg->kaddr)
+ return 0;
+
+- memunmap(reg->kaddr);
+- memset(reg, 0, sizeof(*reg));
++ kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
++ synchronize_rcu();
++ memunmap(kaddr);
+
+ return 0;
+ }
+
+ static int stolen_time_cpu_online(unsigned int cpu)
+ {
++ struct pvclock_vcpu_stolen_time *kaddr = NULL;
+ struct pv_time_stolen_time_region *reg;
+ struct arm_smccc_res res;
+
+@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return -EINVAL;
+
+- reg->kaddr = memremap(res.a0,
++ kaddr = memremap(res.a0,
+ sizeof(struct pvclock_vcpu_stolen_time),
+ MEMREMAP_WB);
+
++ rcu_assign_pointer(reg->kaddr, kaddr);
++
+ if (!reg->kaddr) {
+ pr_warn("Failed to map stolen time data structure\n");
+ return -ENOMEM;
+ }
+
+- if (le32_to_cpu(reg->kaddr->revision) != 0 ||
+- le32_to_cpu(reg->kaddr->attributes) != 0) {
++ if (le32_to_cpu(kaddr->revision) != 0 ||
++ le32_to_cpu(kaddr->attributes) != 0) {
+ pr_warn_once("Unexpected revision or attributes in stolen time data\n");
+ return -ENXIO;
+ }
+diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
+index f0a3df9e18a32..413f899e4ac63 100644
+--- a/arch/arm64/kernel/relocate_kernel.S
++++ b/arch/arm64/kernel/relocate_kernel.S
+@@ -37,6 +37,15 @@
+ * safe memory that has been set up to be preserved during the copy operation.
+ */
+ SYM_CODE_START(arm64_relocate_new_kernel)
++ /*
++ * The kimage structure isn't allocated specially and may be clobbered
++ * during relocation. We must load any values we need from it prior to
++ * any relocation occurring.
++ */
++ ldr x28, [x0, #KIMAGE_START]
++ ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
++ ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
++
+ /* Setup the list loop variables. */
+ ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
+ ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
+@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
+ ic iallu
+ dsb nsh
+ isb
+- ldr x4, [x0, #KIMAGE_START] /* relocation start */
+- ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
+- ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
+ turn_off_mmu x12, x13
+
+ /* Start new image. */
+- cbz x1, .Lel1
+- mov x1, x4 /* relocation start */
+- mov x2, x0 /* dtb address */
++ cbz x27, .Lel1
++ mov x1, x28 /* kernel entry point */
++ mov x2, x26 /* dtb address */
+ mov x3, xzr
+ mov x4, xzr
+ mov x0, #HVC_SOFT_RESTART
+ hvc #0 /* Jumps from el2 */
+ .Lel1:
++ mov x0, x26 /* dtb address */
++ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+- br x4 /* Jumps from el1 */
++ br x28 /* Jumps from el1 */
+ SYM_CODE_END(arm64_relocate_new_kernel)
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 4dc2fba316fff..2ae664a3930fc 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1080,8 +1080,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+- if (irqchip_in_kernel(vcpu->kvm) &&
+- vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
++ if (kvm_vgic_global_state.type == VGIC_V3) {
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
+ }
+diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
+index 64726c670ca64..5204fc6d6d502 100644
+--- a/arch/mips/lantiq/falcon/sysctrl.c
++++ b/arch/mips/lantiq/falcon/sysctrl.c
+@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
+ {
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
++ if (!clk)
++ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
+index 3d5683e75cf1e..200fe9ff641d6 100644
+--- a/arch/mips/lantiq/xway/gptu.c
++++ b/arch/mips/lantiq/xway/gptu.c
+@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
+ {
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
++ if (!clk)
++ return;
+ clk->cl.dev_id = dev_name(dev);
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index 917fac1636b71..084f6caba5f23 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
+ {
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
++ if (!clk)
++ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
+ {
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
++ if (!clk)
++ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+@@ -356,24 +360,28 @@ static void clkdev_add_pci(void)
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+- clk->cl.dev_id = "17000000.pci";
+- clk->cl.con_id = NULL;
+- clk->cl.clk = clk;
+- clk->rate = CLOCK_33M;
+- clk->rates = valid_pci_rates;
+- clk->enable = pci_enable;
+- clk->disable = pmu_disable;
+- clk->module = 0;
+- clk->bits = PMU_PCI;
+- clkdev_add(&clk->cl);
++ if (clk) {
++ clk->cl.dev_id = "17000000.pci";
++ clk->cl.con_id = NULL;
++ clk->cl.clk = clk;
++ clk->rate = CLOCK_33M;
++ clk->rates = valid_pci_rates;
++ clk->enable = pci_enable;
++ clk->disable = pmu_disable;
++ clk->module = 0;
++ clk->bits = PMU_PCI;
++ clkdev_add(&clk->cl);
++ }
+
+ /* use internal/external bus clock */
+- clk_ext->cl.dev_id = "17000000.pci";
+- clk_ext->cl.con_id = "external";
+- clk_ext->cl.clk = clk_ext;
+- clk_ext->enable = pci_ext_enable;
+- clk_ext->disable = pci_ext_disable;
+- clkdev_add(&clk_ext->cl);
++ if (clk_ext) {
++ clk_ext->cl.dev_id = "17000000.pci";
++ clk_ext->cl.con_id = "external";
++ clk_ext->cl.clk = clk_ext;
++ clk_ext->enable = pci_ext_enable;
++ clk_ext->disable = pci_ext_disable;
++ clkdev_add(&clk_ext->cl);
++ }
+ }
+
+ /* xway socs can generate clocks on gpio pins */
+@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
++ if (!name)
++ continue;
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
++ if (!clk) {
++ kfree(name);
++ continue;
++ }
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+index 3eef52b1a59b5..fd93fdadd28ca 100644
+--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
++++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+@@ -167,7 +167,7 @@
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ status = "disabled";
+ };
+- dma: dma@3000000 {
++ dma: dma-controller@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic0>;
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index 2b780786fc689..ead721965b9f9 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -142,10 +142,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
+ do_trap(regs, SIGFPE, si_code, "floating point exception");
+ }
+
+-static void translation_exception(struct pt_regs *regs)
++static void translation_specification_exception(struct pt_regs *regs)
+ {
+ /* May never happen. */
+- panic("Translation exception");
++ panic("Translation-Specification Exception");
+ }
+
+ static void illegal_op(struct pt_regs *regs)
+@@ -374,7 +374,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
+ [0x0f] = hfp_divide_exception,
+ [0x10] = do_dat_exception,
+ [0x11] = do_dat_exception,
+- [0x12] = translation_exception,
++ [0x12] = translation_specification_exception,
+ [0x13] = special_op_exception,
+ [0x14] = default_trap_handler,
+ [0x15] = operand_exception,
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 792f8e0f21789..5bcd9228db5fa 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
+ list_for_each_entry(tmp, &zpci_list, entry) {
+ if (tmp->fid == fid) {
+ zdev = tmp;
++ zpci_zdev_get(zdev);
+ break;
+ }
+ }
+diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
+index e359d2686178b..ecef3a9e16c00 100644
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
+ void zpci_release_device(struct kref *kref);
+ static inline void zpci_zdev_put(struct zpci_dev *zdev)
+ {
+- kref_put(&zdev->kref, zpci_release_device);
++ if (zdev)
++ kref_put(&zdev->kref, zpci_release_device);
+ }
+
+ static inline void zpci_zdev_get(struct zpci_dev *zdev)
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index be077b39da336..5011d27461fd3 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -22,6 +22,8 @@
+ #include <asm/clp.h>
+ #include <uapi/asm/clp.h>
+
++#include "pci_bus.h"
++
+ bool zpci_unique_uid;
+
+ void update_uid_checking(bool new)
+@@ -403,8 +405,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ return;
+
+ zdev = get_zdev_by_fid(entry->fid);
+- if (!zdev)
+- zpci_create_device(entry->fid, entry->fh, entry->config_state);
++ if (zdev) {
++ zpci_zdev_put(zdev);
++ return;
++ }
++ zpci_create_device(entry->fid, entry->fh, entry->config_state);
+ }
+
+ int clp_scan_pci_devices(void)
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index 2e3e5b2789257..ea9db5cea64e3 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -269,7 +269,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+
+ if (!pdev)
+- return;
++ goto no_pdev;
+
+ switch (ccdf->pec) {
+ case 0x003a: /* Service Action or Error Recovery Successful */
+@@ -286,6 +286,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ break;
+ }
+ pci_dev_put(pdev);
++no_pdev:
++ zpci_zdev_put(zdev);
+ }
+
+ void zpci_event_error(void *data)
+@@ -314,6 +316,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
+ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ {
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
++ bool existing_zdev = !!zdev;
+ enum zpci_state state;
+
+ zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
+@@ -378,6 +381,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ default:
+ break;
+ }
++ if (existing_zdev)
++ zpci_zdev_put(zdev);
+ }
+
+ void zpci_event_availability(void *data)
+diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
+index 946f74dd6fbaa..259383e1ad440 100644
+--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
++++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
+@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
+ # xor remaining bytes from partial register into output
+ mov %rcx,%rax
+ and $0xf,%rcx
+- jz .Ldone8
++ jz .Ldone2
+ mov %rax,%r9
+ and $~0xf,%r9
+
+@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
+ # xor remaining bytes from partial register into output
+ mov %rcx,%rax
+ and $0xf,%rcx
+- jz .Ldone8
++ jz .Ldone4
+ mov %rax,%r9
+ and $~0xf,%r9
+
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index e7cd16e1e0a0b..32333dfc85b6a 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5611,6 +5611,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
+ {
+ struct kvm_mmu_page *sp, *node;
+ int nr_zapped, batch = 0;
++ bool unstable;
+
+ restart:
+ list_for_each_entry_safe_reverse(sp, node,
+@@ -5642,11 +5643,12 @@ restart:
+ goto restart;
+ }
+
+- if (__kvm_mmu_prepare_zap_page(kvm, sp,
+- &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
+- batch += nr_zapped;
++ unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
++ &kvm->arch.zapped_obsolete_pages, &nr_zapped);
++ batch += nr_zapped;
++
++ if (unstable)
+ goto restart;
+- }
+ }
+
+ /*
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index eca39f56c2315..0604bc29f0b8c 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -171,9 +171,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
+ return true;
+ }
+
+-static int cmp_u64(const void *a, const void *b)
++static int cmp_u64(const void *pa, const void *pb)
+ {
+- return *(__u64 *)a - *(__u64 *)b;
++ u64 a = *(u64 *)pa;
++ u64 b = *(u64 *)pb;
++
++ return (a > b) - (a < b);
+ }
+
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h
+index 48d6cd12f8a5e..b6b997225841c 100644
+--- a/arch/x86/um/shared/sysdep/syscalls_64.h
++++ b/arch/x86/um/shared/sysdep/syscalls_64.h
+@@ -10,13 +10,12 @@
+ #include <linux/msg.h>
+ #include <linux/shm.h>
+
+-typedef long syscall_handler_t(void);
++typedef long syscall_handler_t(long, long, long, long, long, long);
+
+ extern syscall_handler_t *sys_call_table[];
+
+ #define EXECUTE_SYSCALL(syscall, regs) \
+- (((long (*)(long, long, long, long, long, long)) \
+- (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
++ (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+ UPT_SYSCALL_ARG2(&regs->regs), \
+ UPT_SYSCALL_ARG3(&regs->regs), \
+ UPT_SYSCALL_ARG4(&regs->regs), \
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 3ed5eaf3446a2..6ed602b2f80a5 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -742,6 +742,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ if (at_head) {
+ list_add(&rq->queuelist, &per_prio->dispatch);
++ rq->fifo_time = jiffies;
+ } else {
+ deadline_add_rq_rb(per_prio, rq);
+
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 478ba959362ce..416f4f48f69b0 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
+ unsigned int set_size)
+ {
+ struct drbd_request *r;
+- struct drbd_request *req = NULL;
++ struct drbd_request *req = NULL, *tmp = NULL;
+ int expect_epoch = 0;
+ int expect_size = 0;
+
+@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
+ * to catch requests being barrier-acked "unexpectedly".
+ * It usually should find the same req again, or some READ preceding it. */
+ list_for_each_entry(req, &connection->transfer_log, tl_requests)
+- if (req->epoch == expect_epoch)
++ if (req->epoch == expect_epoch) {
++ tmp = req;
+ break;
++ }
++ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
+ list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
+ if (req->epoch != expect_epoch)
+ break;
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index a29cc2928be47..37b53d6779e41 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
+ static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
+ static DECLARE_WAIT_QUEUE_HEAD(command_done);
+
+-/* Errors during formatting are counted here. */
+-static int format_errors;
++/* errors encountered on the current (or last) request */
++static int floppy_errors;
+
+ /* Format request descriptor. */
+ static struct format_descr format_req;
+@@ -530,7 +530,6 @@ static struct format_descr format_req;
+ static char *floppy_track_buffer;
+ static int max_buffer_sectors;
+
+-static int *errors;
+ typedef void (*done_f)(int);
+ static const struct cont_t {
+ void (*interrupt)(void);
+@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
+ if (drive_params[current_drive].flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+- } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
++ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
+ print_errors();
+ }
+ if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
+@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
+ if (!next_valid_format(current_drive))
+ return;
+ }
+- err_count = ++(*errors);
++ err_count = ++floppy_errors;
+ INFBOUND(write_errors[current_drive].badness, err_count);
+ if (err_count > drive_params[current_drive].max_errors.abort)
+ cont->done(0);
+@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+- format_errors = 0;
+ cont = &format_cont;
+- errors = &format_errors;
++ floppy_errors = 0;
+ ret = wait_til_done(redo_format, true);
+ if (ret == -EINTR)
+ return -EINTR;
+@@ -2761,10 +2759,11 @@ static int set_next_request(void)
+ current_req = list_first_entry_or_null(&floppy_reqs, struct request,
+ queuelist);
+ if (current_req) {
+- current_req->error_count = 0;
++ floppy_errors = 0;
+ list_del_init(&current_req->queuelist);
++ return 1;
+ }
+- return current_req != NULL;
++ return 0;
+ }
+
+ /* Starts or continues processing request. Will automatically unlock the
+@@ -2823,7 +2822,6 @@ do_request:
+ _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
+ } else
+ probing = 0;
+- errors = &(current_req->error_count);
+ tmp = make_raw_rw_request();
+ if (tmp < 2) {
+ request_done(tmp);
+diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
+index 23cc8297ec4c0..d429ba52a7190 100644
+--- a/drivers/clk/at91/clk-generated.c
++++ b/drivers/clk/at91/clk-generated.c
+@@ -117,6 +117,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
+ tmp_rate = parent_rate;
+ else
+ tmp_rate = parent_rate / div;
++
++ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
++ return;
++
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
+index 11f30fd48c141..031b5f701a0a3 100644
+--- a/drivers/crypto/qcom-rng.c
++++ b/drivers/crypto/qcom-rng.c
+@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - currsize);
++ break;
+ }
+ } while (currsize < max);
+
+diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
+index be1bf39a317de..90a920e7f6642 100644
+--- a/drivers/crypto/stm32/stm32-crc32.c
++++ b/drivers/crypto/stm32/stm32-crc32.c
+@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
+ struct stm32_crc *crc = platform_get_drvdata(pdev);
+ int ret = pm_runtime_get_sync(crc->dev);
+
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(crc->dev);
+ return ret;
++ }
+
+ spin_lock(&crc_list.lock);
+ list_del(&crc->list);
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index a6fc96e426687..0ad5039e49b63 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -407,6 +407,7 @@ static inline int is_dma_buf_file(struct file *file)
+
+ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+ {
++ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
+ struct file *file;
+ struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+
+@@ -416,6 +417,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+ inode->i_size = dmabuf->size;
+ inode_set_bytes(inode, dmabuf->size);
+
++ /*
++ * The ->i_ino acquired from get_next_ino() is not unique thus
++ * not suitable for using it as dentry name by dmabuf stats.
++ * Override ->i_ino with the unique and dmabuffs specific
++ * value.
++ */
++ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
+ flags, &dma_buf_fops);
+ if (IS_ERR(file))
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index a2c8dd329b31b..2db19cd640a43 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long flags;
+ unsigned int on, off;
+
++ if (state->polarity != PWM_POLARITY_NORMAL)
++ return -EINVAL;
++
+ val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
+ do_div(val, NSEC_PER_SEC);
+ if (val > UINT_MAX + 1ULL)
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 20780c35da1b4..23cddb265a0dc 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ {
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
++ u32 val;
+
+- if (port->sdata && port->sdata->have_paddr)
+- vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
++ if (port->sdata && port->sdata->have_paddr) {
++ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
++ val |= mask;
++ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
++ }
+
+ vf610_gpio_set(chip, gpio, value);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 9a53a4de2bb7c..e8e8a74026159 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1422,9 +1422,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+ #else
+ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
++static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 0e12315fa0cb8..98ac53ee6bb55 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -1045,6 +1045,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+ }
+
++/**
++ * amdgpu_acpi_should_gpu_reset
++ *
++ * @adev: amdgpu_device_pointer
++ *
++ * returns true if should reset GPU, false if not
++ */
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
++{
++ if (adev->flags & AMD_IS_APU)
++ return false;
++ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
++}
++
+ /**
+ * amdgpu_acpi_is_s0ix_active
+ *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index f09aeff513ee9..e2c422a6825c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2289,7 +2289,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+- if (!adev->in_s0ix)
++ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+index d7559e5a99ce8..e708f07fe75af 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+- if (dc->debug.disable_z10) {
+- /*hw not support z10 or sw disable it*/
+- dc->hwss.z10_restore = NULL;
+- dc->hwss.z10_save_init = NULL;
+- }
+ }
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 8b3822142fed8..a1e5bb78f433c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -4852,6 +4852,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
+
+ mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
+ drm_edid_get_monitor_name(mst_edid, name, namelen);
++ kfree(mst_edid);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
+index a69b28d65a9ba..f005c793cadf6 100644
+--- a/drivers/gpu/drm/i915/display/intel_dmc.c
++++ b/drivers/gpu/drm/i915/display/intel_dmc.c
+@@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
+ }
+ }
+
++static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
++ const u32 *mmioaddr, u32 mmio_count,
++ int header_ver, u8 dmc_id)
++{
++ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
++ u32 start_range, end_range;
++ int i;
++
++ if (dmc_id >= DMC_FW_MAX) {
++ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
++ return false;
++ }
++
++ if (header_ver == 1) {
++ start_range = DMC_MMIO_START_RANGE;
++ end_range = DMC_MMIO_END_RANGE;
++ } else if (dmc_id == DMC_FW_MAIN) {
++ start_range = TGL_MAIN_MMIO_START;
++ end_range = TGL_MAIN_MMIO_END;
++ } else if (DISPLAY_VER(i915) >= 13) {
++ start_range = ADLP_PIPE_MMIO_START;
++ end_range = ADLP_PIPE_MMIO_END;
++ } else if (DISPLAY_VER(i915) >= 12) {
++ start_range = TGL_PIPE_MMIO_START(dmc_id);
++ end_range = TGL_PIPE_MMIO_END(dmc_id);
++ } else {
++ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
++ return false;
++ }
++
++ for (i = 0; i < mmio_count; i++) {
++ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
++ return false;
++ }
++
++ return true;
++}
++
+ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size, u8 dmc_id)
+@@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
+ return 0;
+ }
+
++ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
++ dmc_header->header_ver, dmc_id)) {
++ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
++ return 0;
++ }
++
+ for (i = 0; i < mmio_count; i++) {
+ dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ dmc_info->mmiodata[i] = mmiodata[i];
+diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
+index df10b6898987a..4a2662838cd8d 100644
+--- a/drivers/gpu/drm/i915/display/intel_opregion.c
++++ b/drivers/gpu/drm/i915/display/intel_opregion.c
+@@ -375,21 +375,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ return -EINVAL;
+ }
+
+- /*
+- * The port numbering and mapping here is bizarre. The now-obsolete
+- * swsci spec supports ports numbered [0..4]. Port E is handled as a
+- * special case, but port F and beyond are not. The functionality is
+- * supposed to be obsolete for new platforms. Just bail out if the port
+- * number is out of bounds after mapping.
+- */
+- if (port > 4) {
+- drm_dbg_kms(&dev_priv->drm,
+- "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+- intel_encoder->base.base.id, intel_encoder->base.name,
+- port_name(intel_encoder->port), port);
+- return -EINVAL;
+- }
+-
+ if (!enable)
+ parm |= 4 << 8;
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
+index 7be0002d9d707..f577582ddd9fe 100644
+--- a/drivers/gpu/drm/i915/gt/intel_reset.c
++++ b/drivers/gpu/drm/i915/gt/intel_reset.c
+@@ -791,7 +791,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
+
+- intel_uc_reset(&gt->uc, true);
++ intel_uc_reset(&gt->uc, ALL_ENGINES);
+
+ intel_ggtt_restore_fences(gt->ggtt);
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+index 3aabe164c3291..e1fb8e1da128c 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+@@ -417,7 +417,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
+ void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+ void intel_guc_submission_reset_prepare(struct intel_guc *guc);
+-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
++void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
+ void intel_guc_submission_reset_finish(struct intel_guc *guc);
+ void intel_guc_submission_cancel_requests(struct intel_guc *guc);
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 154ad726e266a..1e51a365833bb 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1603,9 +1603,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+ }
+
+-static void __guc_reset_context(struct intel_context *ce, bool stalled)
++static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
+ {
+- bool local_stalled;
++ bool guilty;
+ struct i915_request *rq;
+ unsigned long flags;
+ u32 head;
+@@ -1647,7 +1647,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ if (!intel_context_is_pinned(ce))
+ goto next_context;
+
+- local_stalled = false;
++ guilty = false;
+ rq = intel_context_find_active_request(ce);
+ if (!rq) {
+ head = ce->ring->tail;
+@@ -1655,14 +1655,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ }
+
+ if (i915_request_started(rq))
+- local_stalled = true;
++ guilty = stalled & ce->engine->mask;
+
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ head = intel_ring_wrap(ce->ring, rq->head);
+
+- __i915_request_reset(rq, local_stalled && stalled);
++ __i915_request_reset(rq, guilty);
+ out_replay:
+- guc_reset_state(ce, head, local_stalled && stalled);
++ guc_reset_state(ce, head, guilty);
+ next_context:
+ if (i != number_children)
+ ce = list_next_entry(ce, parallel.child_link);
+@@ -1673,7 +1673,7 @@ out_put:
+ intel_context_put(parent);
+ }
+
+-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
++void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+ {
+ struct intel_context *ce;
+ unsigned long index;
+@@ -4042,7 +4042,7 @@ static void guc_context_replay(struct intel_context *ce)
+ {
+ struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+
+- __guc_reset_context(ce, true);
++ __guc_reset_context(ce, ce->engine->mask);
+ tasklet_hi_schedule(&sched_engine->tasklet);
+ }
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+index 09ed29df67bc9..cbfb5a01cc1da 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+@@ -592,7 +592,7 @@ sanitize:
+ __uc_sanitize(uc);
+ }
+
+-void intel_uc_reset(struct intel_uc *uc, bool stalled)
++void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
+ {
+ struct intel_guc *guc = &uc->guc;
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+index 866b462821c00..a8f38c2c60e23 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
+ void intel_uc_driver_remove(struct intel_uc *uc);
+ void intel_uc_init_mmio(struct intel_uc *uc);
+ void intel_uc_reset_prepare(struct intel_uc *uc);
+-void intel_uc_reset(struct intel_uc *uc, bool stalled);
++void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
+ void intel_uc_reset_finish(struct intel_uc *uc);
+ void intel_uc_cancel_requests(struct intel_uc *uc);
+ void intel_uc_suspend(struct intel_uc *uc);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4b8fee1be8ae5..f4ab46ae62143 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7938,6 +7938,22 @@ enum {
+ /* MMIO address range for DMC program (0x80000 - 0x82FFF) */
+ #define DMC_MMIO_START_RANGE 0x80000
+ #define DMC_MMIO_END_RANGE 0x8FFFF
++#define DMC_V1_MMIO_START_RANGE 0x80000
++#define TGL_MAIN_MMIO_START 0x8F000
++#define TGL_MAIN_MMIO_END 0x8FFFF
++#define _TGL_PIPEA_MMIO_START 0x92000
++#define _TGL_PIPEA_MMIO_END 0x93FFF
++#define _TGL_PIPEB_MMIO_START 0x96000
++#define _TGL_PIPEB_MMIO_END 0x97FFF
++#define ADLP_PIPE_MMIO_START 0x5F000
++#define ADLP_PIPE_MMIO_END 0x5FFFF
++
++#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
++ _TGL_PIPEB_MMIO_START)
++
++#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
++ _TGL_PIPEB_MMIO_END)
++
+ #define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
+ #define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
+ #define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
+diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
+index 45fe4a7fe0c03..901f0fb04fee4 100644
+--- a/drivers/i2c/busses/i2c-mt7621.c
++++ b/drivers/i2c/busses/i2c-mt7621.c
+@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
+
+ if (i2c->bus_freq == 0) {
+ dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_disable_clk;
+ }
+
+ adap = &i2c->adap;
+@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
+
+ ret = i2c_add_adapter(adap);
+ if (ret < 0)
+- return ret;
++ goto err_disable_clk;
+
+ dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
+
++ return 0;
++
++err_disable_clk:
++ clk_disable_unprepare(i2c->clk);
++
+ return ret;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 8c1b31ed0c429..ac8e7d60672a1 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -77,6 +77,7 @@
+
+ /* SB800 constants */
+ #define SB800_PIIX4_SMB_IDX 0xcd6
++#define SB800_PIIX4_SMB_MAP_SIZE 2
+
+ #define KERNCZ_IMC_IDX 0x3e
+ #define KERNCZ_IMC_DATA 0x3f
+@@ -97,6 +98,9 @@
+ #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+ #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
+
++#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
++#define SB800_PIIX4_FCH_PM_SIZE 8
++
+ /* insmod parameters */
+
+ /* If force is set to anything different from 0, we forcibly enable the
+@@ -155,6 +159,12 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
+ };
+ static const char *piix4_aux_port_name_sb800 = " port 1";
+
++struct sb800_mmio_cfg {
++ void __iomem *addr;
++ struct resource *res;
++ bool use_mmio;
++};
++
+ struct i2c_piix4_adapdata {
+ unsigned short smba;
+
+@@ -162,8 +172,75 @@ struct i2c_piix4_adapdata {
+ bool sb800_main;
+ bool notify_imc;
+ u8 port; /* Port number, shifted */
++ struct sb800_mmio_cfg mmio_cfg;
+ };
+
++static int piix4_sb800_region_request(struct device *dev,
++ struct sb800_mmio_cfg *mmio_cfg)
++{
++ if (mmio_cfg->use_mmio) {
++ struct resource *res;
++ void __iomem *addr;
++
++ res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
++ SB800_PIIX4_FCH_PM_SIZE,
++ "sb800_piix4_smb");
++ if (!res) {
++ dev_err(dev,
++ "SMBus base address memory region 0x%x already in use.\n",
++ SB800_PIIX4_FCH_PM_ADDR);
++ return -EBUSY;
++ }
++
++ addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
++ SB800_PIIX4_FCH_PM_SIZE);
++ if (!addr) {
++ release_resource(res);
++ dev_err(dev, "SMBus base address mapping failed.\n");
++ return -ENOMEM;
++ }
++
++ mmio_cfg->res = res;
++ mmio_cfg->addr = addr;
++
++ return 0;
++ }
++
++ if (!request_muxed_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE,
++ "sb800_piix4_smb")) {
++ dev_err(dev,
++ "SMBus base address index region 0x%x already in use.\n",
++ SB800_PIIX4_SMB_IDX);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static void piix4_sb800_region_release(struct device *dev,
++ struct sb800_mmio_cfg *mmio_cfg)
++{
++ if (mmio_cfg->use_mmio) {
++ iounmap(mmio_cfg->addr);
++ release_resource(mmio_cfg->res);
++ return;
++ }
++
++ release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE);
++}
++
++static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev)
++{
++ /*
++ * cd6h/cd7h port I/O accesses can be disabled on AMD processors
++ * w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on
++ * the same processors and is the recommended access method.
++ */
++ return (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
++ PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
++ PIIX4_dev->revision >= 0x51);
++}
++
+ static int piix4_setup(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
+ {
+@@ -263,12 +340,61 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
+ return piix4_smba;
+ }
+
++static int piix4_setup_sb800_smba(struct pci_dev *PIIX4_dev,
++ u8 smb_en,
++ u8 aux,
++ u8 *smb_en_status,
++ unsigned short *piix4_smba)
++{
++ struct sb800_mmio_cfg mmio_cfg;
++ u8 smba_en_lo;
++ u8 smba_en_hi;
++ int retval;
++
++ mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
++ retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
++ if (retval)
++ return retval;
++
++ if (mmio_cfg.use_mmio) {
++ smba_en_lo = ioread8(mmio_cfg.addr);
++ smba_en_hi = ioread8(mmio_cfg.addr + 1);
++ } else {
++ outb_p(smb_en, SB800_PIIX4_SMB_IDX);
++ smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
++ outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
++ smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
++ }
++
++ piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
++
++ if (!smb_en) {
++ *smb_en_status = smba_en_lo & 0x10;
++ *piix4_smba = smba_en_hi << 8;
++ if (aux)
++ *piix4_smba |= 0x20;
++ } else {
++ *smb_en_status = smba_en_lo & 0x01;
++ *piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
++ }
++
++ if (!*smb_en_status) {
++ dev_err(&PIIX4_dev->dev,
++ "SMBus Host Controller not enabled!\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
+ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id, u8 aux)
+ {
+ unsigned short piix4_smba;
+- u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel;
++ u8 smb_en, smb_en_status, port_sel;
+ u8 i2ccfg, i2ccfg_offset = 0x10;
++ struct sb800_mmio_cfg mmio_cfg;
++ int retval;
+
+ /* SB800 and later SMBus does not support forcing address */
+ if (force || force_addr) {
+@@ -290,35 +416,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ else
+ smb_en = (aux) ? 0x28 : 0x2c;
+
+- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb")) {
+- dev_err(&PIIX4_dev->dev,
+- "SMB base address index region 0x%x already in use.\n",
+- SB800_PIIX4_SMB_IDX);
+- return -EBUSY;
+- }
+-
+- outb_p(smb_en, SB800_PIIX4_SMB_IDX);
+- smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
+- outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
+- smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
++ retval = piix4_setup_sb800_smba(PIIX4_dev, smb_en, aux, &smb_en_status,
++ &piix4_smba);
+
+- release_region(SB800_PIIX4_SMB_IDX, 2);
+-
+- if (!smb_en) {
+- smb_en_status = smba_en_lo & 0x10;
+- piix4_smba = smba_en_hi << 8;
+- if (aux)
+- piix4_smba |= 0x20;
+- } else {
+- smb_en_status = smba_en_lo & 0x01;
+- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+- }
+-
+- if (!smb_en_status) {
+- dev_err(&PIIX4_dev->dev,
+- "SMBus Host Controller not enabled!\n");
+- return -ENODEV;
+- }
++ if (retval)
++ return retval;
+
+ if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
+ return -ENODEV;
+@@ -371,10 +473,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+ }
+ } else {
+- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
+- "sb800_piix4_smb")) {
++ mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
++ retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
++ if (retval) {
+ release_region(piix4_smba, SMBIOSIZE);
+- return -EBUSY;
++ return retval;
+ }
+
+ outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
+@@ -384,7 +487,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ SB800_PIIX4_PORT_IDX;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+- release_region(SB800_PIIX4_SMB_IDX, 2);
++ piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
+ }
+
+ dev_info(&PIIX4_dev->dev,
+@@ -662,6 +765,29 @@ static void piix4_imc_wakeup(void)
+ release_region(KERNCZ_IMC_IDX, 2);
+ }
+
++static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
++{
++ u8 smba_en_lo, val;
++
++ if (mmio_cfg->use_mmio) {
++ smba_en_lo = ioread8(mmio_cfg->addr + piix4_port_sel_sb800);
++ val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
++ if (smba_en_lo != val)
++ iowrite8(val, mmio_cfg->addr + piix4_port_sel_sb800);
++
++ return (smba_en_lo & piix4_port_mask_sb800);
++ }
++
++ outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
++ smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
++
++ val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
++ if (smba_en_lo != val)
++ outb_p(val, SB800_PIIX4_SMB_IDX + 1);
++
++ return (smba_en_lo & piix4_port_mask_sb800);
++}
++
+ /*
+ * Handles access to multiple SMBus ports on the SB800.
+ * The port is selected by bits 2:1 of the smb_en register (0x2c).
+@@ -678,12 +804,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
+ unsigned short piix4_smba = adapdata->smba;
+ int retries = MAX_TIMEOUT;
+ int smbslvcnt;
+- u8 smba_en_lo;
+- u8 port;
++ u8 prev_port;
+ int retval;
+
+- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb"))
+- return -EBUSY;
++ retval = piix4_sb800_region_request(&adap->dev, &adapdata->mmio_cfg);
++ if (retval)
++ return retval;
+
+ /* Request the SMBUS semaphore, avoid conflicts with the IMC */
+ smbslvcnt = inb_p(SMBSLVCNT);
+@@ -738,18 +864,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
+ }
+ }
+
+- outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
+- smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
+-
+- port = adapdata->port;
+- if ((smba_en_lo & piix4_port_mask_sb800) != port)
+- outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
+- SB800_PIIX4_SMB_IDX + 1);
++ prev_port = piix4_sb800_port_sel(adapdata->port, &adapdata->mmio_cfg);
+
+ retval = piix4_access(adap, addr, flags, read_write,
+ command, size, data);
+
+- outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
++ piix4_sb800_port_sel(prev_port, &adapdata->mmio_cfg);
+
+ /* Release the semaphore */
+ outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+@@ -758,7 +878,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
+ piix4_imc_wakeup();
+
+ release:
+- release_region(SB800_PIIX4_SMB_IDX, 2);
++ piix4_sb800_region_release(&adap->dev, &adapdata->mmio_cfg);
+ return retval;
+ }
+
+@@ -836,6 +956,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ return -ENOMEM;
+ }
+
++ adapdata->mmio_cfg.use_mmio = piix4_sb800_use_mmio(dev);
+ adapdata->smba = smba;
+ adapdata->sb800_main = sb800_main;
+ adapdata->port = port << piix4_port_shift_sb800;
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index ccaeb24263854..ba246fabc6c17 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
+
+ static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
+
++static const unsigned int input_max_code[EV_CNT] = {
++ [EV_KEY] = KEY_MAX,
++ [EV_REL] = REL_MAX,
++ [EV_ABS] = ABS_MAX,
++ [EV_MSC] = MSC_MAX,
++ [EV_SW] = SW_MAX,
++ [EV_LED] = LED_MAX,
++ [EV_SND] = SND_MAX,
++ [EV_FF] = FF_MAX,
++};
++
+ static inline int is_event_supported(unsigned int code,
+ unsigned long *bm, unsigned int max)
+ {
+@@ -2074,6 +2085,14 @@ EXPORT_SYMBOL(input_get_timestamp);
+ */
+ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
+ {
++ if (type < EV_CNT && input_max_code[type] &&
++ code > input_max_code[type]) {
++ pr_err("%s: invalid code %u for type %u\n", __func__, code,
++ type);
++ dump_stack();
++ return;
++ }
++
+ switch (type) {
+ case EV_KEY:
+ __set_bit(code, dev->keybit);
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index 2bd407d86bae5..3a48262fb3d35 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -951,9 +951,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
+ if (error)
+ return error;
+
+- usleep_range(50, 100);
++ usleep_range(12000, 15000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+- msleep(100);
++ msleep(160);
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
+index bc11203c9cf78..72e0b767e1ba4 100644
+--- a/drivers/input/touchscreen/stmfts.c
++++ b/drivers/input/touchscreen/stmfts.c
+@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
+
+ err = pm_runtime_get_sync(&sdata->client->dev);
+ if (err < 0)
+- return err;
++ goto out;
+
+ err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
+ if (err)
+- return err;
++ goto out;
+
+ mutex_lock(&sdata->mutex);
+ sdata->running = true;
+@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
+ "failed to enable touchkey\n");
+ }
+
+- return 0;
++out:
++ pm_runtime_put_noidle(&sdata->client->dev);
++ return err;
+ }
+
+ static void stmfts_input_close(struct input_dev *dev)
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index 180d7e9d3400a..81c55bfd6e0c2 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -21,7 +21,7 @@
+
+ #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+ #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+-#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
++#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
+ #define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
+
+ static const u8 tuning_blk_pattern_4bit[] = {
+diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
+index b56a54d6c5a9c..8f184a852a0a7 100644
+--- a/drivers/net/can/m_can/m_can_pci.c
++++ b/drivers/net/can/m_can/m_can_pci.c
+@@ -18,14 +18,9 @@
+
+ #define M_CAN_PCI_MMIO_BAR 0
+
++#define M_CAN_CLOCK_FREQ_EHL 200000000
+ #define CTL_CSR_INT_CTL_OFFSET 0x508
+
+-struct m_can_pci_config {
+- const struct can_bittiming_const *bit_timing;
+- const struct can_bittiming_const *data_timing;
+- unsigned int clock_freq;
+-};
+-
+ struct m_can_pci_priv {
+ struct m_can_classdev cdev;
+
+@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
+ .read_fifo = iomap_read_fifo,
+ };
+
+-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+- .name = KBUILD_MODNAME,
+- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+- .tseg1_max = 64,
+- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+- .tseg2_max = 128,
+- .sjw_max = 128,
+- .brp_min = 1,
+- .brp_max = 512,
+- .brp_inc = 1,
+-};
+-
+-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+- .name = KBUILD_MODNAME,
+- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+- .tseg1_max = 16,
+- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+- .tseg2_max = 8,
+- .sjw_max = 4,
+- .brp_min = 1,
+- .brp_max = 32,
+- .brp_inc = 1,
+-};
+-
+-static const struct m_can_pci_config m_can_pci_ehl = {
+- .bit_timing = &m_can_bittiming_const_ehl,
+- .data_timing = &m_can_data_bittiming_const_ehl,
+- .clock_freq = 200000000,
+-};
+-
+ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ {
+ struct device *dev = &pci->dev;
+- const struct m_can_pci_config *cfg;
+ struct m_can_classdev *mcan_class;
+ struct m_can_pci_priv *priv;
+ void __iomem *base;
+@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ if (!mcan_class)
+ return -ENOMEM;
+
+- cfg = (const struct m_can_pci_config *)id->driver_data;
+-
+ priv = cdev_to_priv(mcan_class);
+
+ priv->base = base;
+@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ mcan_class->dev = &pci->dev;
+ mcan_class->net->irq = pci_irq_vector(pci, 0);
+ mcan_class->pm_clock_support = 1;
+- mcan_class->bit_timing = cfg->bit_timing;
+- mcan_class->data_timing = cfg->data_timing;
+- mcan_class->can.clock.freq = cfg->clock_freq;
++ mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->ops = &m_can_pci_ops;
+
+ pci_set_drvdata(pci, mcan_class);
+@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
+ m_can_pci_suspend, m_can_pci_resume);
+
+ static const struct pci_device_id m_can_pci_id_table[] = {
+- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
++ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
++ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+ { } /* Terminating Entry */
+ };
+ MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 77e76c9efd32f..8201ce7adb777 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -346,7 +346,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ int budget)
+ {
+ struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+- bool is_rsc_completed = true;
+ int err = 0;
+
+ for (; (self->sw_head != self->hw_head) && budget;
+@@ -364,12 +363,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ continue;
+
+ if (!buff->is_eop) {
++ unsigned int frag_cnt = 0U;
+ buff_ = buff;
+ do {
++ bool is_rsc_completed = true;
++
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
++
++ frag_cnt++;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+ is_rsc_completed =
+@@ -377,18 +381,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ next_,
+ self->hw_head);
+
+- if (unlikely(!is_rsc_completed))
+- break;
++ if (unlikely(!is_rsc_completed) ||
++ frag_cnt > MAX_SKB_FRAGS) {
++ err = 0;
++ goto err_exit;
++ }
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
+
+- if (!is_rsc_completed) {
+- err = 0;
+- goto err_exit;
+- }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+@@ -446,7 +449,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+- skb_add_rx_frag(skb, 0, buff->rxdata.page,
++ skb_add_rx_frag(skb, i++, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ AQ_CFG_RX_FRAME_MAX);
+@@ -455,7 +458,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+- i = 1U;
+ do {
+ next_ = buff_->next;
+ buff_ = &self->buff_ring[next_];
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index d875ce3ec759b..15ede7285fb5d 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ err = -ENXIO;
+ goto err_exit;
+ }
++
++ /* Validate that the new hw_head_ is reasonable. */
++ if (hw_head_ >= ring->size) {
++ err = -ENXIO;
++ goto err_exit;
++ }
++
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 60dde29974bfe..df51be3cbe069 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
+- if (IS_ERR(priv->wol_clk))
+- return PTR_ERR(priv->wol_clk);
++ if (IS_ERR(priv->wol_clk)) {
++ ret = PTR_ERR(priv->wol_clk);
++ goto err_deregister_fixed_link;
++ }
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index c4f4b13ac4691..c1100af5666b4 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1217,7 +1217,6 @@ static void gem_rx_refill(struct macb_queue *queue)
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+- queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
+
+ if (!queue->rx_skbuff[entry]) {
+@@ -1256,6 +1255,7 @@ static void gem_rx_refill(struct macb_queue *queue)
+ dma_wmb();
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
++ queue->rx_prepared_head++;
+ }
+
+ /* Make descriptor updates visible to hardware */
+diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
+index 79df5a72877b8..0040dcaab9455 100644
+--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ /* alloc_etherdev ensures aligned and zeroed private structures */
+ dev = alloc_etherdev (sizeof (*tp));
+- if (!dev)
++ if (!dev) {
++ pci_disable_device(pdev);
+ return -ENOMEM;
++ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+@@ -1785,6 +1787,7 @@ err_out_free_res:
+
+ err_out_free_netdev:
+ free_netdev (dev);
++ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 15bb6f001a04f..5f86cc1cfd09c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3207,8 +3207,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+- coalesce[i].itr_tx = q_vector->tx.itr_setting;
+- coalesce[i].itr_rx = q_vector->rx.itr_setting;
++ coalesce[i].itr_tx = q_vector->tx.itr_settings;
++ coalesce[i].itr_rx = q_vector->rx.itr_settings;
+ coalesce[i].intrl = q_vector->intrl;
+
+ if (i < vsi->num_txq)
+@@ -3264,21 +3264,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ */
+ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[i].itr_rx;
++ rc->itr_settings = coalesce[i].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_rxq) {
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[0].itr_rx;
++ rc->itr_settings = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[i].itr_tx;
++ rc->itr_settings = coalesce[i].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_txq) {
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[0].itr_tx;
++ rc->itr_settings = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+@@ -3292,12 +3292,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ for (; i < vsi->num_q_vectors; i++) {
+ /* transmit */
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[0].itr_tx;
++ rc->itr_settings = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ /* receive */
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[0].itr_rx;
++ rc->itr_settings = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ vsi->q_vectors[i]->intrl = coalesce[0].intrl;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7f6715eb862fe..30f055e1a92aa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5907,9 +5907,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ }
+
+- /* clear this now, and the first stats read will be used as baseline */
+- vsi->stat_offsets_loaded = false;
+-
++ /* Perform an initial read of the statistics registers now to
++ * set the baseline so counters are ready when interface is up
++ */
++ ice_update_eth_stats(vsi);
+ ice_service_task_schedule(pf);
+
+ return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 45ae97b8b97db..836c67f1aa465 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -499,12 +499,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old. It must also be called whenever the PHC
+ * time has been changed.
++ *
++ * Return:
++ * * 0 - OK, successfully updated
++ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
++static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+ {
+ u64 systime;
+ int i;
+
++ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
++ return -EAGAIN;
++
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+@@ -527,6 +534,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
++ clear_bit(ICE_CFG_BUSY, pf->state);
++
++ return 0;
+ }
+
+ /**
+@@ -2322,17 +2332,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
+ {
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
++ int err;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
+- ice_ptp_update_cached_phctime(pf);
++ err = ice_ptp_update_cached_phctime(pf);
+
+ ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
+
+- /* Run twice a second */
++ /* Run twice a second or reschedule if phc update failed */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work,
+- msecs_to_jiffies(500));
++ msecs_to_jiffies(err ? 10 : 500));
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index b7b3bd4816f0d..ec4733272034f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -379,9 +379,14 @@ struct ice_ring_container {
+ /* this matches the maximum number of ITR bits, but in usec
+ * values, so it is shifted left one bit (bit zero is ignored)
+ */
+- u16 itr_setting:13;
+- u16 itr_reserved:2;
+- u16 itr_mode:1;
++ union {
++ struct {
++ u16 itr_setting:13;
++ u16 itr_reserved:2;
++ u16 itr_mode:1;
++ };
++ u16 itr_settings;
++ };
+ enum ice_container_type type;
+ };
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index c1e4ad65b02de..4e0abfe68cfdb 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -5512,7 +5512,8 @@ static void igb_watchdog_task(struct work_struct *work)
+ break;
+ }
+
+- if (adapter->link_speed != SPEED_1000)
++ if (adapter->link_speed != SPEED_1000 ||
++ !hw->phy.ops.read_reg)
+ goto no_wait;
+
+ /* wait for Remote receiver status OK */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 169e3524bb1c7..3500faf086710 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3829,6 +3829,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
+ if (netdev->features & NETIF_F_NTUPLE)
+ netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
++ features &= ~NETIF_F_GRO_HW;
++ if (netdev->features & NETIF_F_GRO_HW)
++ netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
++
+ return features;
+ }
+
+@@ -3861,6 +3865,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ }
+ }
+
++ if (params->xdp_prog) {
++ if (features & NETIF_F_LRO) {
++ netdev_warn(netdev, "LRO is incompatible with XDP\n");
++ features &= ~NETIF_F_LRO;
++ }
++ if (features & NETIF_F_GRO_HW) {
++ netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
++ features &= ~NETIF_F_GRO_HW;
++ }
++ }
++
++ if (priv->xsk.refcnt) {
++ if (features & NETIF_F_GRO_HW) {
++ netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
++ priv->xsk.refcnt);
++ features &= ~NETIF_F_GRO_HW;
++ }
++ }
++
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ features &= ~NETIF_F_RXHASH;
+ if (netdev->features & NETIF_F_RXHASH)
+@@ -4805,10 +4828,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+
+- if (!!MLX5_CAP_GEN(mdev, shampo) &&
+- mlx5e_check_fragmented_striding_rq_cap(mdev))
+- netdev->hw_features |= NETIF_F_GRO_HW;
+-
+ if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 537c82b9aa530..b6f58d16d1453 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2656,28 +2656,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
+ clean_tree(&root_ns->ns.node);
+ }
+
+-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+-{
+- struct mlx5_flow_steering *steering = dev->priv.steering;
+-
+- cleanup_root_ns(steering->root_ns);
+- cleanup_root_ns(steering->fdb_root_ns);
+- steering->fdb_root_ns = NULL;
+- kfree(steering->fdb_sub_ns);
+- steering->fdb_sub_ns = NULL;
+- cleanup_root_ns(steering->port_sel_root_ns);
+- cleanup_root_ns(steering->sniffer_rx_root_ns);
+- cleanup_root_ns(steering->sniffer_tx_root_ns);
+- cleanup_root_ns(steering->rdma_rx_root_ns);
+- cleanup_root_ns(steering->rdma_tx_root_ns);
+- cleanup_root_ns(steering->egress_root_ns);
+- mlx5_cleanup_fc_stats(dev);
+- kmem_cache_destroy(steering->ftes_cache);
+- kmem_cache_destroy(steering->fgs_cache);
+- mlx5_ft_pool_destroy(dev);
+- kfree(steering);
+-}
+-
+ static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+ {
+ struct fs_prio *prio;
+@@ -3063,42 +3041,27 @@ cleanup:
+ return err;
+ }
+
+-int mlx5_init_fs(struct mlx5_core_dev *dev)
++void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
+ {
+- struct mlx5_flow_steering *steering;
+- int err = 0;
+-
+- err = mlx5_init_fc_stats(dev);
+- if (err)
+- return err;
+-
+- err = mlx5_ft_pool_init(dev);
+- if (err)
+- return err;
+-
+- steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+- if (!steering) {
+- err = -ENOMEM;
+- goto err;
+- }
+-
+- steering->dev = dev;
+- dev->priv.steering = steering;
++ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+- if (mlx5_fs_dr_is_supported(dev))
+- steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+- else
+- steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
++ cleanup_root_ns(steering->root_ns);
++ cleanup_root_ns(steering->fdb_root_ns);
++ steering->fdb_root_ns = NULL;
++ kfree(steering->fdb_sub_ns);
++ steering->fdb_sub_ns = NULL;
++ cleanup_root_ns(steering->port_sel_root_ns);
++ cleanup_root_ns(steering->sniffer_rx_root_ns);
++ cleanup_root_ns(steering->sniffer_tx_root_ns);
++ cleanup_root_ns(steering->rdma_rx_root_ns);
++ cleanup_root_ns(steering->rdma_tx_root_ns);
++ cleanup_root_ns(steering->egress_root_ns);
++}
+
+- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+- sizeof(struct mlx5_flow_group), 0,
+- 0, NULL);
+- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+- 0, NULL);
+- if (!steering->ftes_cache || !steering->fgs_cache) {
+- err = -ENOMEM;
+- goto err;
+- }
++int mlx5_fs_core_init(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering = dev->priv.steering;
++ int err = 0;
+
+ if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev, nic_flow_table))) ||
+@@ -3157,8 +3120,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
+ }
+
+ return 0;
++
++err:
++ mlx5_fs_core_cleanup(dev);
++ return err;
++}
++
++void mlx5_fs_core_free(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering = dev->priv.steering;
++
++ kmem_cache_destroy(steering->ftes_cache);
++ kmem_cache_destroy(steering->fgs_cache);
++ kfree(steering);
++ mlx5_ft_pool_destroy(dev);
++ mlx5_cleanup_fc_stats(dev);
++}
++
++int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering;
++ int err = 0;
++
++ err = mlx5_init_fc_stats(dev);
++ if (err)
++ return err;
++
++ err = mlx5_ft_pool_init(dev);
++ if (err)
++ goto err;
++
++ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
++ if (!steering) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ steering->dev = dev;
++ dev->priv.steering = steering;
++
++ if (mlx5_fs_dr_is_supported(dev))
++ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
++ else
++ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
++
++ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
++ sizeof(struct mlx5_flow_group), 0,
++ 0, NULL);
++ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
++ 0, NULL);
++ if (!steering->ftes_cache || !steering->fgs_cache) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ return 0;
++
+ err:
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_free(dev);
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+index 5469b08d635f1..6366bf50a564b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+@@ -293,8 +293,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode);
+
+-int mlx5_init_fs(struct mlx5_core_dev *dev);
+-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
++int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
++void mlx5_fs_core_free(struct mlx5_core_dev *dev);
++int mlx5_fs_core_init(struct mlx5_core_dev *dev);
++void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
+
+ int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 862f5b7cb2106..1c771287bee53 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -8,7 +8,8 @@
+ enum {
+ MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
+ MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+- MLX5_FW_RESET_FLAGS_PENDING_COMP
++ MLX5_FW_RESET_FLAGS_PENDING_COMP,
++ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
+ };
+
+ struct mlx5_fw_reset {
+@@ -165,7 +166,10 @@ static void poll_sync_reset(struct timer_list *t)
+
+ if (fatal_error) {
+ mlx5_core_warn(dev, "Got Device Reset\n");
+- queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
++ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
++ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
++ else
++ mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
+ return;
+ }
+
+@@ -390,9 +394,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
+ struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
+ struct mlx5_eqe *eqe = data;
+
++ if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
++ return NOTIFY_DONE;
++
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
+- queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
++ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ break;
+ case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
+ mlx5_sync_reset_events_handle(fw_reset, eqe);
+@@ -436,6 +443,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
+ mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ }
+
++void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
++{
++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++
++ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
++ cancel_work_sync(&fw_reset->fw_live_patch_work);
++ cancel_work_sync(&fw_reset->reset_request_work);
++ cancel_work_sync(&fw_reset->reset_reload_work);
++ cancel_work_sync(&fw_reset->reset_now_work);
++ cancel_work_sync(&fw_reset->reset_abort_work);
++}
++
+ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+index 7761ee5fc7d0a..372046e173e78 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+@@ -15,6 +15,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
++void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
+ int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index bba72b220cc3f..4e49dca94bc38 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -939,6 +939,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+ goto err_sf_table_cleanup;
+ }
+
++ err = mlx5_fs_core_alloc(dev);
++ if (err) {
++ mlx5_core_err(dev, "Failed to alloc flow steering\n");
++ goto err_fs;
++ }
++
+ dev->dm = mlx5_dm_create(dev);
+ if (IS_ERR(dev->dm))
+ mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+@@ -949,6 +955,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+
+ return 0;
+
++err_fs:
++ mlx5_sf_table_cleanup(dev);
+ err_sf_table_cleanup:
+ mlx5_sf_hw_table_cleanup(dev);
+ err_sf_hw_table_cleanup:
+@@ -986,6 +994,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+ mlx5_hv_vhca_destroy(dev->hv_vhca);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_dm_cleanup(dev);
++ mlx5_fs_core_free(dev);
+ mlx5_sf_table_cleanup(dev);
+ mlx5_sf_hw_table_cleanup(dev);
+ mlx5_vhca_event_cleanup(dev);
+@@ -1192,7 +1201,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ goto err_tls_start;
+ }
+
+- err = mlx5_init_fs(dev);
++ err = mlx5_fs_core_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init flow steering\n");
+ goto err_fs;
+@@ -1237,7 +1246,7 @@ err_ec:
+ err_vhca:
+ mlx5_vhca_event_stop(dev);
+ err_set_hca:
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_cleanup(dev);
+ err_fs:
+ mlx5_accel_tls_cleanup(dev);
+ err_tls_start:
+@@ -1266,7 +1275,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
+ mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+ mlx5_vhca_event_stop(dev);
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_cleanup(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+@@ -1619,6 +1628,10 @@ static void remove_one(struct pci_dev *pdev)
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
+
++ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
++ * fw_reset before unregistering the devlink.
++ */
++ mlx5_drain_fw_reset(dev);
+ devlink_unregister(devlink);
+ mlx5_crdump_disable(dev);
+ mlx5_drain_health_wq(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index c61a5e83c78c4..8622af6d6bf83 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
+ return 0;
+ }
+
++static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
++ struct mlx5dr_ste_actions_attr *attr,
++ bool rx_rule,
++ bool *recalc_cs_required)
++{
++ *recalc_cs_required = false;
++
++ /* if device supports csum recalculation - no adjustment needed */
++ if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
++ return;
++
++ /* no adjustment needed on TX rules */
++ if (!rx_rule)
++ return;
++
++ if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
++ /* Ignore the modify TTL action.
++ * It is always kept as last HW action.
++ */
++ attr->modify_actions--;
++ return;
++ }
++
++ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
++ /* Due to a HW bug on some devices, modifying TTL on RX flows
++ * will cause an incorrect checksum calculation. In such cases
++ * we will use a FW table to recalculate the checksum.
++ */
++ *recalc_cs_required = true;
++}
++
+ static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *actions[],
+ int last_idx)
+@@ -649,8 +680,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ case DR_ACTION_TYP_MODIFY_HDR:
+ attr.modify_index = action->rewrite->index;
+ attr.modify_actions = action->rewrite->num_of_actions;
+- recalc_cs_required = action->rewrite->modify_ttl &&
+- !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
++ if (action->rewrite->modify_ttl)
++ dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
++ &recalc_cs_required);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+@@ -737,12 +769,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
+ last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
+
+- /* Due to a HW bug in some devices, modifying TTL on RX flows will
+- * cause an incorrect checksum calculation. In this case we will
+- * use a FW table to recalculate.
+- */
+- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+- rx_rule && recalc_cs_required && dest_action) {
++ if (recalc_cs_required && dest_action) {
+ ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
+ if (ret) {
+ mlx5dr_err(dmn,
+@@ -847,7 +874,8 @@ struct mlx5dr_action *
+ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests,
+- bool ignore_flow_level)
++ bool ignore_flow_level,
++ u32 flow_source)
+ {
+ struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+ struct mlx5dr_action **ref_actions;
+@@ -919,7 +947,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ reformat_req,
+ &action->dest_tbl->fw_tbl.id,
+ &action->dest_tbl->fw_tbl.group_id,
+- ignore_flow_level);
++ ignore_flow_level,
++ flow_source);
+ if (ret)
+ goto free_action;
+
+@@ -1560,12 +1589,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
+ return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
+ }
+
+-static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
+-{
+- return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
+- !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
+-}
+-
+ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ u32 max_hw_actions,
+ u32 num_sw_actions,
+@@ -1577,6 +1600,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
++ __be64 *modify_ttl_sw_action = NULL;
+ int ret, i, hw_idx = 0;
+ __be64 *sw_action;
+ __be64 hw_action;
+@@ -1589,8 +1613,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ action->rewrite->allow_rx = 1;
+ action->rewrite->allow_tx = 1;
+
+- for (i = 0; i < num_sw_actions; i++) {
+- sw_action = &sw_actions[i];
++ for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
++ /* modify TTL is handled separately, as a last action */
++ if (i == num_sw_actions) {
++ sw_action = modify_ttl_sw_action;
++ modify_ttl_sw_action = NULL;
++ } else {
++ sw_action = &sw_actions[i];
++ }
+
+ ret = dr_action_modify_check_field_limitation(action,
+ sw_action);
+@@ -1599,10 +1629,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+
+ if (!(*modify_ttl) &&
+ dr_action_modify_check_is_ttl_modify(sw_action)) {
+- if (dr_action_modify_ttl_ignore(dmn))
+- continue;
+-
++ modify_ttl_sw_action = sw_action;
+ *modify_ttl = true;
++ continue;
+ }
+
+ /* Convert SW action to HW action */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+index 68a4c32d5f34c..f05ef0cd54bac 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id,
+- bool ignore_flow_level)
++ bool ignore_flow_level,
++ u32 flow_source)
+ {
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+ struct mlx5dr_cmd_fte_info fte_info = {};
+@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ fte_info.val = val;
+ fte_info.dest_arr = dest;
+ fte_info.ignore_flow_level = ignore_flow_level;
++ fte_info.flow_context.flow_source = flow_source;
+
+ ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+ if (ret) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+index 2d62950f7a294..134c8484c9016 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+@@ -419,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
++ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+ dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+@@ -511,7 +511,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ }
+ }
+
+- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
++ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+ if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+index 55fcb751e24a4..64f41e7938e14 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+@@ -1463,7 +1463,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id,
+- bool ignore_flow_level);
++ bool ignore_flow_level,
++ u32 flow_source);
+ void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+ u32 group_id);
+ #endif /* _DR_TYPES_H_ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+index 3f311462bedf3..05393fe11132b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ } else if (num_term_actions > 1) {
+ bool ignore_flow_level =
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
++ u32 flow_source = fte->flow_context.flow_source;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+ term_actions,
+ num_term_actions,
+- ignore_flow_level);
++ ignore_flow_level,
++ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+index dfa223415fe24..74a7a2f4d50d6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+@@ -96,7 +96,8 @@ struct mlx5dr_action *
+ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests,
+- bool ignore_flow_level);
++ bool ignore_flow_level,
++ u32 flow_source);
+
+ struct mlx5dr_action *mlx5dr_action_create_drop(void);
+
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 1f60fd125a1dc..fee148bbf13ea 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -100,6 +100,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
+ return 0;
+ }
+
++static bool lan966x_port_unique_address(struct net_device *dev)
++{
++ struct lan966x_port *port = netdev_priv(dev);
++ struct lan966x *lan966x = port->lan966x;
++ int p;
++
++ for (p = 0; p < lan966x->num_phys_ports; ++p) {
++ port = lan966x->ports[p];
++ if (!port || port->dev == dev)
++ continue;
++
++ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
++ return false;
++ }
++
++ return true;
++}
++
+ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+ {
+ struct lan966x_port *port = netdev_priv(dev);
+@@ -107,16 +125,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+ const struct sockaddr *addr = p;
+ int ret;
+
++ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
++ return 0;
++
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
++ /* If there is another port with the same address as the dev, then don't
++ * delete it from the MAC table
++ */
++ if (!lan966x_port_unique_address(dev))
++ goto out;
++
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
++out:
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index b30589a135c24..06f4d9a9e9388 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
+ qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
++ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
++ test_bit(QL_RESET_START, &qdev->flags)) {
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ /*
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+index fcf17d8a0494b..644bb54f5f020 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
+ return -ENOMEM;
+
+ /* Enable pci device */
+- ret = pci_enable_device(pdev);
++ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+-
+- pci_disable_device(pdev);
+ }
+
+ static int __maybe_unused stmmac_pci_suspend(struct device *dev)
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index bc981043cc808..a701178a1d139 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ struct gsi_trans *trans;
++ u32 trans_count = 0;
+ u32 byte_count = 0;
+- u32 old_index;
+ u32 event_avail;
++ u32 old_index;
+
+ trans_info = &channel->trans_info;
+
+@@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+ do {
+ trans->len = __le16_to_cpu(event->len);
+ byte_count += trans->len;
++ trans_count++;
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+@@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+
+ /* We record RX bytes when they are received */
+ channel->byte_count += byte_count;
+- channel->trans_count++;
++ channel->trans_count += trans_count;
+ }
+
+ /* Initialize a ring, including allocating DMA memory for its entries */
+diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
+index 68291a3efd040..2ecfc17544a6a 100644
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -1169,13 +1169,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ return;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+- if (!skb)
+- return;
+-
+- /* Copy the data into the socket buffer and receive it */
+- skb_put(skb, len);
+- memcpy(skb->data, data, len);
+- skb->truesize += extra;
++ if (skb) {
++ /* Copy the data into the socket buffer and receive it */
++ skb_put(skb, len);
++ memcpy(skb->data, data, len);
++ skb->truesize += extra;
++ }
+
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+ }
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 3619520340b74..e172743948ed7 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
+ path->encap.proto = htons(ETH_P_PPP_SES);
+ path->encap.id = be16_to_cpu(po->num);
+ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
++ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
+ path->dev = ctx->dev;
+ ctx->dev = dev;
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index d9d90baac72a2..93e8d119d45f6 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ dev_kfree_skb_any(rbi->skb);
++ rbi->skb = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ put_page(rbi->page);
++ rbi->page = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
+ u32 i, ring_idx;
+ struct Vmxnet3_RxDesc *rxd;
+
++ /* ring has already been cleaned up */
++ if (!rq->rx_ring[0].base)
++ return;
++
+ for (ring_idx = 0; ring_idx < 2; ring_idx++) {
+ for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
+ #ifdef __BIG_ENDIAN_BITFIELD
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 10f7c79caac2d..0abd772c57f08 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4422,6 +4422,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+ if (ctrl->queue_count > 1) {
+ nvme_queue_scan(ctrl);
+ nvme_start_queues(ctrl);
++ nvme_mpath_update(ctrl);
+ }
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index a703f1f5fb64c..189175fff7e44 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -635,8 +635,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
+ ns->ana_grpid = le32_to_cpu(desc->grpid);
+ ns->ana_state = desc->state;
+ clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
+-
+- if (nvme_state_is_live(ns->ana_state))
++ /*
++ * nvme_mpath_set_live() will trigger I/O to the multipath path device
++ * and in turn to this path device. However we cannot accept this I/O
++ * if the controller is not live. This may deadlock if called from
++ * nvme_mpath_init_identify() and the ctrl will never complete
++ * initialization, preventing I/O from completing. For this case we
++ * will reprocess the ANA log page in nvme_mpath_update() once the
++ * controller is ready.
++ */
++ if (nvme_state_is_live(ns->ana_state) &&
++ ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_mpath_set_live(ns);
+ }
+
+@@ -723,6 +732,18 @@ static void nvme_ana_work(struct work_struct *work)
+ nvme_read_ana_log(ctrl);
+ }
+
++void nvme_mpath_update(struct nvme_ctrl *ctrl)
++{
++ u32 nr_change_groups = 0;
++
++ if (!ctrl->ana_log_buf)
++ return;
++
++ mutex_lock(&ctrl->ana_lock);
++ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
++ mutex_unlock(&ctrl->ana_lock);
++}
++
+ static void nvme_anatt_timeout(struct timer_list *t)
+ {
+ struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 68c42e8311172..85f3f55c71c58 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -800,6 +800,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+ void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
++void nvme_mpath_update(struct nvme_ctrl *ctrl);
+ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
+ bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+@@ -874,6 +875,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
+ "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
+ return 0;
+ }
++static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
++{
++}
+ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+ {
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index e4b79bee62068..94a0b933b1335 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3470,7 +3470,10 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_128_BYTES_SQES |
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
+-
++ { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
++ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
++ NVME_QUIRK_NO_DEEPEST_PS |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+ };
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 6fb24746de069..c3a9df5545cf4 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -984,7 +984,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
+ ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
+ mutex_unlock(&ctrl->lock);
+
+- schedule_work(&ctrl->async_event_work);
++ queue_work(nvmet_wq, &ctrl->async_event_work);
+ }
+
+ void nvmet_execute_keep_alive(struct nvmet_req *req)
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 496d775c67707..cea30e4f50533 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -1554,7 +1554,7 @@ static void nvmet_port_release(struct config_item *item)
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ /* Let inflight controllers teardown complete */
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+ list_del(&port->global_entry);
+
+ kfree(port->ana_state);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 626caf6f1e4b4..1c026a21f2185 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
+ static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+ static DEFINE_IDA(cntlid_ida);
+
++struct workqueue_struct *nvmet_wq;
++EXPORT_SYMBOL_GPL(nvmet_wq);
++
+ /*
+ * This read/write semaphore is used to synchronize access to configuration
+ * information on a target system that will result in discovery log page
+@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ list_add_tail(&aen->entry, &ctrl->async_events);
+ mutex_unlock(&ctrl->lock);
+
+- schedule_work(&ctrl->async_event_work);
++ queue_work(nvmet_wq, &ctrl->async_event_work);
+ }
+
+ static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
+@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
+ if (reset_tbkas) {
+ pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
+ ctrl->cntlid);
+- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
++ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
+
+@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+ pr_debug("ctrl %d start keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
++ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+@@ -1479,7 +1482,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
+ mutex_lock(&ctrl->lock);
+ if (!(ctrl->csts & NVME_CSTS_CFS)) {
+ ctrl->csts |= NVME_CSTS_CFS;
+- schedule_work(&ctrl->fatal_err_work);
++ queue_work(nvmet_wq, &ctrl->fatal_err_work);
+ }
+ mutex_unlock(&ctrl->lock);
+ }
+@@ -1620,9 +1623,15 @@ static int __init nvmet_init(void)
+ goto out_free_zbd_work_queue;
+ }
+
++ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
++ if (!nvmet_wq) {
++ error = -ENOMEM;
++ goto out_free_buffered_work_queue;
++ }
++
+ error = nvmet_init_discovery();
+ if (error)
+- goto out_free_work_queue;
++ goto out_free_nvmet_work_queue;
+
+ error = nvmet_init_configfs();
+ if (error)
+@@ -1631,7 +1640,9 @@ static int __init nvmet_init(void)
+
+ out_exit_discovery:
+ nvmet_exit_discovery();
+-out_free_work_queue:
++out_free_nvmet_work_queue:
++ destroy_workqueue(nvmet_wq);
++out_free_buffered_work_queue:
+ destroy_workqueue(buffered_io_wq);
+ out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
+@@ -1643,6 +1654,7 @@ static void __exit nvmet_exit(void)
+ nvmet_exit_configfs();
+ nvmet_exit_discovery();
+ ida_destroy(&cntlid_ida);
++ destroy_workqueue(nvmet_wq);
+ destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
+
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 22b5108168a6a..c43bc5e1c7a28 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+- if (!schedule_work(&assoc->del_work))
++ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ continue;
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+- if (!schedule_work(&assoc->del_work))
++ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+- if (!schedule_work(&assoc->del_work))
++ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ return;
+@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+ iod->rqstdatalen = lsreqbuf_len;
+ iod->hosthandle = hosthandle;
+
+- schedule_work(&iod->work);
++ queue_work(nvmet_wq, &iod->work);
+
+ return 0;
+ }
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 54606f1872b4a..5c16372f3b533 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+- schedule_work(&rport->ls_work);
++ queue_work(nvmet_wq, &rport->ls_work);
+ return ret;
+ }
+
+@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+- schedule_work(&rport->ls_work);
++ queue_work(nvmet_wq, &rport->ls_work);
+ }
+
+ return 0;
+@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+- schedule_work(&tport->ls_work);
++ queue_work(nvmet_wq, &tport->ls_work);
+ return ret;
+ }
+
+@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+- schedule_work(&tport->ls_work);
++ queue_work(nvmet_wq, &tport->ls_work);
+ }
+
+ return 0;
+@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
+ tgt_rscn->tport = tgtport->private;
+ INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
+
+- schedule_work(&tgt_rscn->work);
++ queue_work(nvmet_wq, &tgt_rscn->work);
+ }
+
+ static void
+@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
+
+- schedule_work(&tfcp_req->fcp_rcv_work);
++ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
+
+ return 0;
+ }
+@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
+ {
+ struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+
+- schedule_work(&tfcp_req->tio_done_work);
++ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
+ }
+
+ static void
+@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+- WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
++ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index 6be6e59d273bb..80f079a7015d6 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -292,7 +292,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_flush_work);
+- schedule_work(&req->f.work);
++ queue_work(nvmet_wq, &req->f.work);
+ }
+
+ static void nvmet_file_execute_discard(struct nvmet_req *req)
+@@ -352,7 +352,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_dsm_work);
+- schedule_work(&req->f.work);
++ queue_work(nvmet_wq, &req->f.work);
+ }
+
+ static void nvmet_file_write_zeroes_work(struct work_struct *w)
+@@ -382,7 +382,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+ INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
+- schedule_work(&req->f.work);
++ queue_work(nvmet_wq, &req->f.work);
+ }
+
+ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index eb1094254c823..2a968eeddda37 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ iod->req.transfer_len = blk_rq_payload_bytes(req);
+ }
+
+- schedule_work(&iod->work);
++ queue_work(nvmet_wq, &iod->work);
+ return BLK_STS_OK;
+ }
+
+@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
+ return;
+ }
+
+- schedule_work(&iod->work);
++ queue_work(nvmet_wq, &iod->work);
+ }
+
+ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index af193423c10bb..ff26dbde8c1e9 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -366,6 +366,7 @@ struct nvmet_req {
+
+ extern struct workqueue_struct *buffered_io_wq;
+ extern struct workqueue_struct *zbd_wq;
++extern struct workqueue_struct *nvmet_wq;
+
+ static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
+ {
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 9e5b89ae29dfe..2b5031b646e92 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -281,7 +281,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+ if (req->p.use_workqueue || effects) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+- schedule_work(&req->p.work);
++ queue_work(nvmet_wq, &req->p.work);
+ } else {
+ rq->end_io_data = req;
+ blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 1deb4043e2425..0ebfe21911655 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+
+ if (queue->host_qid == 0) {
+ /* Let inflight controller teardown complete */
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+ }
+
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+@@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+
+ if (disconnect) {
+ rdma_disconnect(queue->cm_id);
+- schedule_work(&queue->release_work);
++ queue_work(nvmet_wq, &queue->release_work);
+ }
+ }
+
+@@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ pr_err("failed to connect queue %d\n", queue->idx);
+- schedule_work(&queue->release_work);
++ queue_work(nvmet_wq, &queue->release_work);
+ }
+
+ /**
+@@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+- schedule_delayed_work(&port->repair_work, 0);
++ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
+ break;
+ }
+ fallthrough;
+@@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+- schedule_delayed_work(&port->repair_work, 5 * HZ);
++ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
+ }
+
+ static int nvmet_rdma_add_port(struct nvmet_port *nport)
+@@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+ }
+
+ static struct ib_client nvmet_rdma_ib_client = {
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 7c1c43ce466bc..31bab7477d531 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
+ spin_lock(&queue->state_lock);
+ if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
+ queue->state = NVMET_TCP_Q_DISCONNECTING;
+- schedule_work(&queue->release_work);
++ queue_work(nvmet_wq, &queue->release_work);
+ }
+ spin_unlock(&queue->state_lock);
+ }
+@@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
+ goto out;
+
+ if (sk->sk_state == TCP_LISTEN)
+- schedule_work(&port->accept_work);
++ queue_work(nvmet_wq, &port->accept_work);
+ out:
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+@@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+
+ if (sq->qid == 0) {
+ /* Let inflight controller teardown complete */
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+ }
+
+ queue->nr_cmds = sq->size * 2;
+@@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
+
+ nvmet_unregister_transport(&nvmet_tcp_ops);
+
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+- flush_scheduled_work();
++ flush_workqueue(nvmet_wq);
+
+ destroy_workqueue(nvmet_tcp_wq);
+ }
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 5be382b19d9a7..27169c0231809 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -272,7 +272,6 @@ struct advk_pcie {
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
+- int irq;
+ struct irq_domain *rp_irq_domain;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
+@@ -1570,26 +1569,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
+ }
+ }
+
+-static void advk_pcie_irq_handler(struct irq_desc *desc)
++static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+ {
+- struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
+- struct irq_chip *chip = irq_desc_get_chip(desc);
+- u32 val, mask, status;
++ struct advk_pcie *pcie = arg;
++ u32 status;
+
+- chained_irq_enter(chip, desc);
++ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
++ if (!(status & PCIE_IRQ_CORE_INT))
++ return IRQ_NONE;
+
+- val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+- mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
+- status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
++ advk_pcie_handle_int(pcie);
+
+- if (status & PCIE_IRQ_CORE_INT) {
+- advk_pcie_handle_int(pcie);
++ /* Clear interrupt */
++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+- /* Clear interrupt */
+- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+- }
+-
+- chained_irq_exit(chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+@@ -1671,7 +1665,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ struct advk_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+- int ret;
++ int ret, irq;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+ if (!bridge)
+@@ -1757,9 +1751,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+- pcie->irq = platform_get_irq(pdev, 0);
+- if (pcie->irq < 0)
+- return pcie->irq;
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
++ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
++ pcie);
++ if (ret) {
++ dev_err(dev, "Failed to register interrupt\n");
++ return ret;
++ }
+
+ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
+ "reset-gpios", 0,
+@@ -1816,15 +1818,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
+-
+ bridge->sysdata = pcie;
+ bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = advk_pcie_map_irq;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+@@ -1873,9 +1872,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+- /* Remove IRQ handler */
+- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+-
+ /* Remove IRQ domains */
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 9ecce435fb3f1..d25122fbe98ab 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2920,6 +2920,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
+ },
++ /*
++ * Downstream device is not accessible after putting a root port
++ * into D3cold and back into D0 on Elo i2.
++ */
++ .ident = "Elo i2",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
++ },
+ },
+ #endif
+ { }
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index a3fa03bcd9a30..54064714d73fb 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -1236,18 +1236,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
+ FUNC_GROUP_DECL(WDTRST4, AA12);
+
+ #define AE12 196
+-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
+- SIG_DESC_SET(SCU438, 4));
+ SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
+-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
+- SIG_EXPR_LIST_PTR(AE12, GPIOY4));
++PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
+
+ #define AF12 197
+-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
+- SIG_DESC_SET(SCU438, 5));
+ SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
+-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
+- SIG_EXPR_LIST_PTR(AF12, GPIOY5));
++PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+
+ #define AC12 198
+ SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
+@@ -1520,9 +1514,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+ PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
+
+ GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
+-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+ GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
+-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
++FUNC_DECL_1(FWSPID, FWSPID);
+ FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
+ FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
+ /*
+@@ -1918,7 +1911,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(FSI2),
+ ASPEED_PINCTRL_GROUP(FWSPIABR),
+ ASPEED_PINCTRL_GROUP(FWSPID),
+- ASPEED_PINCTRL_GROUP(FWQSPID),
+ ASPEED_PINCTRL_GROUP(FWSPIWP),
+ ASPEED_PINCTRL_GROUP(GPIT0),
+ ASPEED_PINCTRL_GROUP(GPIT1),
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+index 79b1fee5a1eba..ddee0db72d264 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
+ MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
+ MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
+- MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
++ MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
+ MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
+ MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
+ MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 370459243007b..61e3844cddbf0 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -129,6 +129,7 @@ enum {
+ FUNC_PTP1,
+ FUNC_PTP2,
+ FUNC_PTP3,
++ FUNC_PTPSYNC_0,
+ FUNC_PTPSYNC_1,
+ FUNC_PTPSYNC_2,
+ FUNC_PTPSYNC_3,
+@@ -252,6 +253,7 @@ static const char *const ocelot_function_names[] = {
+ [FUNC_PTP1] = "ptp1",
+ [FUNC_PTP2] = "ptp2",
+ [FUNC_PTP3] = "ptp3",
++ [FUNC_PTPSYNC_0] = "ptpsync_0",
+ [FUNC_PTPSYNC_1] = "ptpsync_1",
+ [FUNC_PTPSYNC_2] = "ptpsync_2",
+ [FUNC_PTPSYNC_3] = "ptpsync_3",
+@@ -891,7 +893,7 @@ LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NON
+ LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
+ LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+ LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+-LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
++LAN966X_P(35, GPIO, FC1_b, PTPSYNC_0, SGPIO_a, CAN0_b, NONE, NONE, R);
+ LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
+ LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+ LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
+diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
+index 272c89837d745..0dbceee87a4b1 100644
+--- a/drivers/platform/chrome/cros_ec_debugfs.c
++++ b/drivers/platform/chrome/cros_ec_debugfs.c
+@@ -25,6 +25,9 @@
+
+ #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+
++/* waitqueue for log readers */
++static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
++
+ /**
+ * struct cros_ec_debugfs - EC debugging information.
+ *
+@@ -33,7 +36,6 @@
+ * @log_buffer: circular buffer for console log information
+ * @read_msg: preallocated EC command and buffer to read console log
+ * @log_mutex: mutex to protect circular buffer
+- * @log_wq: waitqueue for log readers
+ * @log_poll_work: recurring task to poll EC for new console log data
+ * @panicinfo_blob: panicinfo debugfs blob
+ */
+@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
+ struct circ_buf log_buffer;
+ struct cros_ec_command *read_msg;
+ struct mutex log_mutex;
+- wait_queue_head_t log_wq;
+ struct delayed_work log_poll_work;
+ /* EC panicinfo */
+ struct debugfs_blob_wrapper panicinfo_blob;
+@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
+ buf_space--;
+ }
+
+- wake_up(&debug_info->log_wq);
++ wake_up(&cros_ec_debugfs_log_wq);
+ }
+
+ mutex_unlock(&debug_info->log_mutex);
+@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
+
+ mutex_unlock(&debug_info->log_mutex);
+
+- ret = wait_event_interruptible(debug_info->log_wq,
++ ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
+ CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
+ if (ret < 0)
+ return ret;
+@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ __poll_t mask = 0;
+
+- poll_wait(file, &debug_info->log_wq, wait);
++ poll_wait(file, &cros_ec_debugfs_log_wq, wait);
+
+ mutex_lock(&debug_info->log_mutex);
+ if (CIRC_CNT(debug_info->log_buffer.head,
+@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
+ debug_info->log_buffer.tail = 0;
+
+ mutex_init(&debug_info->log_mutex);
+- init_waitqueue_head(&debug_info->log_wq);
+
+ debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
+ debug_info, &cros_ec_console_log_fops);
+diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
+index c1775db29efb6..ec66fde28e75a 100644
+--- a/drivers/platform/surface/surface_gpe.c
++++ b/drivers/platform/surface/surface_gpe.c
+@@ -99,6 +99,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
++ {
++ .ident = "Surface Pro 8",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
++ },
++ .driver_data = (void *)lid_device_props_l4B,
++ },
+ {
+ .ident = "Surface Book 1",
+ .matches = {
+diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
+index 6b6f3e2a617af..f73ecfd4a3092 100644
+--- a/drivers/platform/x86/intel/pmt/telemetry.c
++++ b/drivers/platform/x86/intel/pmt/telemetry.c
+@@ -103,7 +103,7 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+- struct intel_pmt_entry *entry = &priv->entry[i];
++ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ if (ret < 0)
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 3fb8cda31eb9e..0ea71416d292a 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -309,6 +309,15 @@ struct ibm_init_struct {
+ struct ibm_struct *data;
+ };
+
++/* DMI Quirks */
++struct quirk_entry {
++ bool btusb_bug;
++};
++
++static struct quirk_entry quirk_btusb_bug = {
++ .btusb_bug = true,
++};
++
+ static struct {
+ u32 bluetooth:1;
+ u32 hotkey:1;
+@@ -338,6 +347,7 @@ static struct {
+ u32 hotkey_poll_active:1;
+ u32 has_adaptive_kbd:1;
+ u32 kbd_lang:1;
++ struct quirk_entry *quirks;
+ } tp_features;
+
+ static struct {
+@@ -4361,9 +4371,10 @@ static void bluetooth_exit(void)
+ bluetooth_shutdown();
+ }
+
+-static const struct dmi_system_id bt_fwbug_list[] __initconst = {
++static const struct dmi_system_id fwbug_list[] __initconst = {
+ {
+ .ident = "ThinkPad E485",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+@@ -4371,6 +4382,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad E585",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+@@ -4378,6 +4390,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A285 - 20MW",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+@@ -4385,6 +4398,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A285 - 20MX",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+@@ -4392,6 +4406,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A485 - 20MU",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+@@ -4399,6 +4414,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A485 - 20MV",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+@@ -4421,7 +4437,8 @@ static int __init have_bt_fwbug(void)
+ * Some AMD based ThinkPads have a firmware bug that calling
+ * "GBDC" will cause bluetooth on Intel wireless cards blocked
+ */
+- if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
++ if (tp_features.quirks && tp_features.quirks->btusb_bug &&
++ pci_dev_present(fwbug_cards_ids)) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ FW_BUG "disable bluetooth subdriver for Intel cards\n");
+ return 1;
+@@ -8749,24 +8766,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ fan_status_access_mode = TPACPI_FAN_RD_TPEC;
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+- if (quirks & TPACPI_FAN_2FAN) {
+- tp_features.second_fan = 1;
+- pr_info("secondary fan support enabled\n");
+- }
+- if (quirks & TPACPI_FAN_2CTL) {
+- tp_features.second_fan = 1;
+- tp_features.second_fan_ctl = 1;
+- pr_info("secondary fan control enabled\n");
+- }
+ /* Try and probe the 2nd fan */
++ tp_features.second_fan = 1; /* needed for get_speed to work */
+ res = fan2_get_speed(&speed);
+ if (res >= 0) {
+ /* It responded - so let's assume it's there */
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control detected & enabled\n");
++ } else {
++ /* Fan not auto-detected */
++ tp_features.second_fan = 0;
++ if (quirks & TPACPI_FAN_2FAN) {
++ tp_features.second_fan = 1;
++ pr_info("secondary fan support enabled\n");
++ }
++ if (quirks & TPACPI_FAN_2CTL) {
++ tp_features.second_fan = 1;
++ tp_features.second_fan_ctl = 1;
++ pr_info("secondary fan control enabled\n");
++ }
+ }
+-
+ } else {
+ pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
+ return -ENODEV;
+@@ -11438,6 +11458,7 @@ static void thinkpad_acpi_module_exit(void)
+
+ static int __init thinkpad_acpi_module_init(void)
+ {
++ const struct dmi_system_id *dmi_id;
+ int ret, i;
+
+ tpacpi_lifecycle = TPACPI_LIFE_INIT;
+@@ -11477,6 +11498,10 @@ static int __init thinkpad_acpi_module_init(void)
+ return -ENODEV;
+ }
+
++ dmi_id = dmi_first_match(fwbug_list);
++ if (dmi_id)
++ tp_features.quirks = dmi_id->driver_data;
++
+ /* Device initialization */
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
+ NULL, 0);
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 17ad5f0d13b2a..6585789ed6951 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -625,7 +625,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
+ }
+
+ static void
+-ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
++ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
+ {
+ struct timespec64 ts;
+ unsigned long flags;
+@@ -634,7 +634,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+ spin_lock_irqsave(&bp->lock, flags);
+ err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+ if (likely(!err)) {
+- timespec64_add_ns(&ts, delta_ns);
++ set_normalized_timespec64(&ts, ts.tv_sec,
++ ts.tv_nsec + delta_ns);
+ __ptp_ocp_settime_locked(bp, &ts);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
+index 4b460c61f1d8c..40d504dac1a92 100644
+--- a/drivers/rtc/class.c
++++ b/drivers/rtc/class.c
+@@ -26,6 +26,15 @@ struct class *rtc_class;
+ static void rtc_device_release(struct device *dev)
+ {
+ struct rtc_device *rtc = to_rtc_device(dev);
++ struct timerqueue_head *head = &rtc->timerqueue;
++ struct timerqueue_node *node;
++
++ mutex_lock(&rtc->ops_lock);
++ while ((node = timerqueue_getnext(head)))
++ timerqueue_del(head, node);
++ mutex_unlock(&rtc->ops_lock);
++
++ cancel_work_sync(&rtc->irqwork);
+
+ ida_simple_remove(&rtc_ida, rtc->id);
+ mutex_destroy(&rtc->ops_lock);
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index 562f99b664a24..522449b25921e 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time)
+ }
+ EXPORT_SYMBOL_GPL(mc146818_get_time);
+
++/* AMD systems don't allow access to AltCentury with DV1 */
++static bool apply_amd_register_a_behavior(void)
++{
++#ifdef CONFIG_X86
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
++ return true;
++#endif
++ return false;
++}
++
+ /* Set the current date and time in the real time clock. */
+ int mc146818_set_time(struct rtc_time *time)
+ {
+@@ -249,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time)
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
++ if (apply_amd_register_a_behavior())
++ CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
++ else
++ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ #ifdef CONFIG_MACH_DECSTATION
+ CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index 81a5b1f2e68c3..6c9d8de41e7be 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
+ static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ {
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+- unsigned int buf[5], ctrl2;
++ u8 buf[5];
++ unsigned int ctrl2;
+ int ret;
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
+index 711832c758aea..bcc0c2ce4b4e7 100644
+--- a/drivers/rtc/rtc-sun6i.c
++++ b/drivers/rtc/rtc-sun6i.c
+@@ -138,7 +138,7 @@ struct sun6i_rtc_dev {
+ const struct sun6i_rtc_clk_data *data;
+ void __iomem *base;
+ int irq;
+- unsigned long alarm;
++ time64_t alarm;
+
+ struct clk_hw hw;
+ struct clk_hw *int_osc;
+@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ struct rtc_time tm_now;
+- unsigned long time_now = 0;
+- unsigned long time_set = 0;
+- unsigned long time_gap = 0;
+- int ret = 0;
++ time64_t time_now, time_set;
++ int ret;
+
+ ret = sun6i_rtc_gettime(dev, &tm_now);
+ if (ret < 0) {
+@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+ return -EINVAL;
+ }
+
+- time_gap = time_set - time_now;
+-
+- if (time_gap > U32_MAX) {
++ if ((time_set - time_now) > U32_MAX) {
+ dev_err(dev, "Date too far in the future\n");
+ return -EINVAL;
+ }
+@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+ writel(0, chip->base + SUN6I_ALRM_COUNTER);
+ usleep_range(100, 300);
+
+- writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
++ writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
+ chip->alarm = time_set;
+
+ sun6i_rtc_setaie(wkalrm->enabled, chip);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 37d06f993b761..1d9be771f3ee0 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
+ case SCSI_ACCESS_STATE_OPTIMAL:
+ case SCSI_ACCESS_STATE_ACTIVE:
+ case SCSI_ACCESS_STATE_LBA:
+- return BLK_STS_OK;
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+- return BLK_STS_AGAIN;
++ return BLK_STS_OK;
+ default:
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index b109716d44fb7..7ab3c9e4d4783 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -3837,6 +3837,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
++ if (cmd->sg_mapped)
++ qlt_unmap_sg(vha, cmd);
++
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /*
+ * It's normal to see 2 calls in this path:
+diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
+index b34feba1f53de..8dc818b03939a 100644
+--- a/drivers/scsi/ufs/ufshpb.c
++++ b/drivers/scsi/ufs/ufshpb.c
+@@ -1256,6 +1256,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
+ int data_seg_len;
+
++ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
++ & MASK_RSP_UPIU_DATA_SEG_LEN;
++
++ /* If data segment length is zero, rsp_field is not valid */
++ if (!data_seg_len)
++ return;
++
+ if (unlikely(lrbp->lun != rsp_field->lun)) {
+ struct scsi_device *sdev;
+ bool found = false;
+@@ -1290,18 +1297,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ return;
+ }
+
+- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+- & MASK_RSP_UPIU_DATA_SEG_LEN;
+-
+- /* To flush remained rsp_list, we queue the map_work task */
+- if (!data_seg_len) {
+- if (!ufshpb_is_general_lun(hpb->lun))
+- return;
+-
+- ufshpb_kick_map_work(hpb);
+- return;
+- }
+-
+ BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
+
+ if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index d86c3a36441ee..3427ce37a5c5b 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -145,6 +145,7 @@ enum dev_state {
+ STATE_DEV_INVALID = 0,
+ STATE_DEV_OPENED,
+ STATE_DEV_INITIALIZED,
++ STATE_DEV_REGISTERING,
+ STATE_DEV_RUNNING,
+ STATE_DEV_CLOSED,
+ STATE_DEV_FAILED
+@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
+ ret = -EINVAL;
+ goto out_unlock;
+ }
++ dev->state = STATE_DEV_REGISTERING;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_gadget_probe_driver(&dev->driver);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 1b5de3af1a627..9c45be8ab1788 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
+ struct mlx5_flow_handle *rx_rule_mcast;
+ bool setup;
+ u32 cur_num_vqs;
++ u32 rqt_size;
+ struct notifier_block nb;
+ struct vdpa_callback config_cb;
+ struct mlx5_vdpa_wq_ent cvq_ent;
+@@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+ }
+
+-static inline u32 mlx5_vdpa_max_qps(int max_vqs)
+-{
+- return max_vqs / 2;
+-}
+-
+ static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
+ {
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ return 2;
+
+- return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
++ return mvdev->max_vqs;
+ }
+
+ static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
+@@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
+ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ {
+ __be32 *list;
+- int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+- int num;
+-
+- if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+- num = 1;
+- else
+- num = ndev->cur_num_vqs / 2;
+
+- max_rqt = min_t(int, roundup_pow_of_two(num),
+- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+- if (max_rqt < 1)
+- return -EOPNOTSUPP;
+-
+- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
++ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+- MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+- list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
++ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
++ list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
+
+- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
+ kfree(in);
+ if (err)
+@@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+ {
+ __be32 *list;
+- int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+- max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
+- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+- if (max_rqt < 1)
+- return -EOPNOTSUPP;
+-
+- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
++ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
++ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
+
+- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
+ kfree(in);
+ if (err)
+@@ -1582,7 +1560,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+
+ newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+- newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
++ newqps > ndev->rqt_size)
+ break;
+
+ if (ndev->cur_num_vqs == 2 * newqps) {
+@@ -1937,7 +1915,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
+ int err;
+ int i;
+
+- for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
++ for (i = 0; i < mvdev->max_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto err_vq;
+@@ -2008,9 +1986,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
+
+ ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
+- ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
++ ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ else
+- ndev->cur_num_vqs = 2;
++ ndev->rqt_size = 1;
++
++ ndev->cur_num_vqs = 2 * ndev->rqt_size;
+
+ update_cvq_info(mvdev);
+ return err;
+@@ -2463,7 +2443,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
++ for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+@@ -2583,7 +2563,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ return -EOPNOTSUPP;
+ }
+
+- max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
++ max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
++ 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
+ if (max_vqs < 2) {
+ dev_warn(mdev->device,
+ "%d virtqueues are supported. At least 2 are required\n",
+@@ -2647,7 +2628,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
+ }
+
+- config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
++ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
+ mvdev->vdev.dma_dev = &mdev->pdev->dev;
+ err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ if (err)
+@@ -2674,7 +2655,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ ndev->nb.notifier_call = event_handler;
+ mlx5_notifier_register(mdev, &ndev->nb);
+ mvdev->vdev.mdev = &mgtdev->mgtdev;
+- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
++ err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
+ if (err)
+ goto err_reg;
+
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 792ab5f236471..297b5db474545 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -1450,13 +1450,9 @@ err:
+ return ERR_PTR(r);
+ }
+
+-static struct ptr_ring *get_tap_ptr_ring(int fd)
++static struct ptr_ring *get_tap_ptr_ring(struct file *file)
+ {
+ struct ptr_ring *ring;
+- struct file *file = fget(fd);
+-
+- if (!file)
+- return NULL;
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
+ goto out;
+@@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
+ goto out;
+ ring = NULL;
+ out:
+- fput(file);
+ return ring;
+ }
+
+@@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
+ r = vhost_net_enable_vq(n, vq);
+ if (r)
+ goto err_used;
+- if (index == VHOST_NET_VQ_RX)
+- nvq->rx_ring = get_tap_ptr_ring(fd);
++ if (index == VHOST_NET_VQ_RX) {
++ if (sock)
++ nvq->rx_ring = get_tap_ptr_ring(sock->file);
++ else
++ nvq->rx_ring = NULL;
++ }
+
+ oldubufs = nvq->ubufs;
+ nvq->ubufs = ubufs;
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index ec5249e8c32d9..05f5fd2af58f8 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+ return;
+
+ irq = ops->get_vq_irq(vdpa, qid);
++ if (irq < 0)
++ return;
++
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+- if (!vq->call_ctx.ctx || irq < 0)
++ if (!vq->call_ctx.ctx)
+ return;
+
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 10a9369c9dea4..00f0f282e7a13 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1438,10 +1438,7 @@ fb_release(struct inode *inode, struct file *file)
+ __acquires(&info->lock)
+ __releases(&info->lock)
+ {
+- struct fb_info * const info = file_fb_info(file);
+-
+- if (!info)
+- return -ENODEV;
++ struct fb_info * const info = file->private_data;
+
+ lock_fb_info(info);
+ if (info->fbops->fb_release)
+diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
+index 26892940c2136..82e31a2d845e1 100644
+--- a/drivers/video/fbdev/core/fbsysfs.c
++++ b/drivers/video/fbdev/core/fbsysfs.c
+@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
+ {
+ if (!info)
+ return;
++
++ if (WARN_ON(refcount_read(&info->count)))
++ return;
++
+ kfree(info->apertures);
+ kfree(info);
+ }
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index dd9a744f82f85..86ffb58fbc854 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -49,7 +49,7 @@
+ /* internal variables */
+
+ enum tco_reg_layout {
+- sp5100, sb800, efch
++ sp5100, sb800, efch, efch_mmio
+ };
+
+ struct sp5100_tco {
+@@ -86,6 +86,10 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+ dev->revision < 0x40) {
+ return sp5100;
++ } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
++ sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
++ sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
++ return efch_mmio;
+ } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ dev->revision >= 0x41) ||
+@@ -209,6 +213,8 @@ static void tco_timer_enable(struct sp5100_tco *tco)
+ ~EFCH_PM_WATCHDOG_DISABLE,
+ EFCH_PM_DECODEEN_SECOND_RES);
+ break;
++ default:
++ break;
+ }
+ }
+
+@@ -223,14 +229,195 @@ static u32 sp5100_tco_read_pm_reg32(u8 index)
+ return val;
+ }
+
++static u32 sp5100_tco_request_region(struct device *dev,
++ u32 mmio_addr,
++ const char *dev_name)
++{
++ if (!devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
++ dev_name)) {
++ dev_dbg(dev, "MMIO address 0x%08x already in use\n", mmio_addr);
++ return 0;
++ }
++
++ return mmio_addr;
++}
++
++static u32 sp5100_tco_prepare_base(struct sp5100_tco *tco,
++ u32 mmio_addr,
++ u32 alt_mmio_addr,
++ const char *dev_name)
++{
++ struct device *dev = tco->wdd.parent;
++
++ dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n", mmio_addr);
++
++ if (!mmio_addr && !alt_mmio_addr)
++ return -ENODEV;
++
++ /* Check for MMIO address and alternate MMIO address conflicts */
++ if (mmio_addr)
++ mmio_addr = sp5100_tco_request_region(dev, mmio_addr, dev_name);
++
++ if (!mmio_addr && alt_mmio_addr)
++ mmio_addr = sp5100_tco_request_region(dev, alt_mmio_addr, dev_name);
++
++ if (!mmio_addr) {
++ dev_err(dev, "Failed to reserve MMIO or alternate MMIO region\n");
++ return -EBUSY;
++ }
++
++ tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
++ if (!tco->tcobase) {
++ dev_err(dev, "MMIO address 0x%08x failed mapping\n", mmio_addr);
++ devm_release_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
++ return -ENOMEM;
++ }
++
++ dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
++
++ return 0;
++}
++
++static int sp5100_tco_timer_init(struct sp5100_tco *tco)
++{
++ struct watchdog_device *wdd = &tco->wdd;
++ struct device *dev = wdd->parent;
++ u32 val;
++
++ val = readl(SP5100_WDT_CONTROL(tco->tcobase));
++ if (val & SP5100_WDT_DISABLED) {
++ dev_err(dev, "Watchdog hardware is disabled\n");
++ return -ENODEV;
++ }
++
++ /*
++ * Save WatchDogFired status, because WatchDogFired flag is
++ * cleared here.
++ */
++ if (val & SP5100_WDT_FIRED)
++ wdd->bootstatus = WDIOF_CARDRESET;
++
++ /* Set watchdog action to reset the system */
++ val &= ~SP5100_WDT_ACTION_RESET;
++ writel(val, SP5100_WDT_CONTROL(tco->tcobase));
++
++ /* Set a reasonable heartbeat before we stop the timer */
++ tco_timer_set_timeout(wdd, wdd->timeout);
++
++ /*
++ * Stop the TCO before we change anything so we don't race with
++ * a zeroed timer.
++ */
++ tco_timer_stop(wdd);
++
++ return 0;
++}
++
++static u8 efch_read_pm_reg8(void __iomem *addr, u8 index)
++{
++ return readb(addr + index);
++}
++
++static void efch_update_pm_reg8(void __iomem *addr, u8 index, u8 reset, u8 set)
++{
++ u8 val;
++
++ val = readb(addr + index);
++ val &= reset;
++ val |= set;
++ writeb(val, addr + index);
++}
++
++static void tco_timer_enable_mmio(void __iomem *addr)
++{
++ efch_update_pm_reg8(addr, EFCH_PM_DECODEEN3,
++ ~EFCH_PM_WATCHDOG_DISABLE,
++ EFCH_PM_DECODEEN_SECOND_RES);
++}
++
++static int sp5100_tco_setupdevice_mmio(struct device *dev,
++ struct watchdog_device *wdd)
++{
++ struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
++ const char *dev_name = SB800_DEVNAME;
++ u32 mmio_addr = 0, alt_mmio_addr = 0;
++ struct resource *res;
++ void __iomem *addr;
++ int ret;
++ u32 val;
++
++ res = request_mem_region_muxed(EFCH_PM_ACPI_MMIO_PM_ADDR,
++ EFCH_PM_ACPI_MMIO_PM_SIZE,
++ "sp5100_tco");
++
++ if (!res) {
++ dev_err(dev,
++ "Memory region 0x%08x already in use\n",
++ EFCH_PM_ACPI_MMIO_PM_ADDR);
++ return -EBUSY;
++ }
++
++ addr = ioremap(EFCH_PM_ACPI_MMIO_PM_ADDR, EFCH_PM_ACPI_MMIO_PM_SIZE);
++ if (!addr) {
++ dev_err(dev, "Address mapping failed\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /*
++ * EFCH_PM_DECODEEN_WDT_TMREN is dual purpose. This bitfield
++ * enables sp5100_tco register MMIO space decoding. The bitfield
++ * also starts the timer operation. Enable if not already enabled.
++ */
++ val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
++ if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
++ efch_update_pm_reg8(addr, EFCH_PM_DECODEEN, 0xff,
++ EFCH_PM_DECODEEN_WDT_TMREN);
++ }
++
++ /* Error if the timer could not be enabled */
++ val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
++ if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
++ dev_err(dev, "Failed to enable the timer\n");
++ ret = -EFAULT;
++ goto out;
++ }
++
++ mmio_addr = EFCH_PM_WDT_ADDR;
++
++ /* Determine alternate MMIO base address */
++ val = efch_read_pm_reg8(addr, EFCH_PM_ISACONTROL);
++ if (val & EFCH_PM_ISACONTROL_MMIOEN)
++ alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
++ EFCH_PM_ACPI_MMIO_WDT_OFFSET;
++
++ ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
++ if (!ret) {
++ tco_timer_enable_mmio(addr);
++ ret = sp5100_tco_timer_init(tco);
++ }
++
++out:
++ if (addr)
++ iounmap(addr);
++
++ release_resource(res);
++
++ return ret;
++}
++
+ static int sp5100_tco_setupdevice(struct device *dev,
+ struct watchdog_device *wdd)
+ {
+ struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
+ const char *dev_name;
+ u32 mmio_addr = 0, val;
++ u32 alt_mmio_addr = 0;
+ int ret;
+
++ if (tco->tco_reg_layout == efch_mmio)
++ return sp5100_tco_setupdevice_mmio(dev, wdd);
++
+ /* Request the IO ports used by this driver */
+ if (!request_muxed_region(SP5100_IO_PM_INDEX_REG,
+ SP5100_PM_IOPORTS_SIZE, "sp5100_tco")) {
+@@ -247,138 +434,55 @@ static int sp5100_tco_setupdevice(struct device *dev,
+ dev_name = SP5100_DEVNAME;
+ mmio_addr = sp5100_tco_read_pm_reg32(SP5100_PM_WATCHDOG_BASE) &
+ 0xfffffff8;
++
++ /*
++ * Secondly, find the watchdog timer MMIO address
++ * from SBResource_MMIO register.
++ */
++
++ /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
++ pci_read_config_dword(sp5100_tco_pci,
++ SP5100_SB_RESOURCE_MMIO_BASE,
++ &val);
++
++ /* Verify MMIO is enabled and using bar0 */
++ if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
++ alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
+ break;
+ case sb800:
+ dev_name = SB800_DEVNAME;
+ mmio_addr = sp5100_tco_read_pm_reg32(SB800_PM_WATCHDOG_BASE) &
+ 0xfffffff8;
++
++ /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
++ val = sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
++
++ /* Verify MMIO is enabled and using bar0 */
++ if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
++ alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
+ break;
+ case efch:
+ dev_name = SB800_DEVNAME;
+- /*
+- * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
+- * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
+- * region, it also enables the watchdog itself.
+- */
+- if (boot_cpu_data.x86 == 0x17) {
+- val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
+- if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+- sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
+- EFCH_PM_DECODEEN_WDT_TMREN);
+- }
+- }
+ val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
+ if (val & EFCH_PM_DECODEEN_WDT_TMREN)
+ mmio_addr = EFCH_PM_WDT_ADDR;
++
++ val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
++ if (val & EFCH_PM_ISACONTROL_MMIOEN)
++ alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
++ EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+- /* Check MMIO address conflict */
+- if (!mmio_addr ||
+- !devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
+- dev_name)) {
+- if (mmio_addr)
+- dev_dbg(dev, "MMIO address 0x%08x already in use\n",
+- mmio_addr);
+- switch (tco->tco_reg_layout) {
+- case sp5100:
+- /*
+- * Secondly, Find the watchdog timer MMIO address
+- * from SBResource_MMIO register.
+- */
+- /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+- pci_read_config_dword(sp5100_tco_pci,
+- SP5100_SB_RESOURCE_MMIO_BASE,
+- &mmio_addr);
+- if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
+- SB800_ACPI_MMIO_SEL)) !=
+- SB800_ACPI_MMIO_DECODE_EN) {
+- ret = -ENODEV;
+- goto unreg_region;
+- }
+- mmio_addr &= ~0xFFF;
+- mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
+- break;
+- case sb800:
+- /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+- mmio_addr =
+- sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
+- if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
+- SB800_ACPI_MMIO_SEL)) !=
+- SB800_ACPI_MMIO_DECODE_EN) {
+- ret = -ENODEV;
+- goto unreg_region;
+- }
+- mmio_addr &= ~0xFFF;
+- mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
+- break;
+- case efch:
+- val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
+- if (!(val & EFCH_PM_ISACONTROL_MMIOEN)) {
+- ret = -ENODEV;
+- goto unreg_region;
+- }
+- mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+- EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+- break;
+- }
+- dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n",
+- mmio_addr);
+- if (!devm_request_mem_region(dev, mmio_addr,
+- SP5100_WDT_MEM_MAP_SIZE,
+- dev_name)) {
+- dev_dbg(dev, "MMIO address 0x%08x already in use\n",
+- mmio_addr);
+- ret = -EBUSY;
+- goto unreg_region;
+- }
+- }
+-
+- tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+- if (!tco->tcobase) {
+- dev_err(dev, "failed to get tcobase address\n");
+- ret = -ENOMEM;
+- goto unreg_region;
+- }
+-
+- dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
+-
+- /* Setup the watchdog timer */
+- tco_timer_enable(tco);
+-
+- val = readl(SP5100_WDT_CONTROL(tco->tcobase));
+- if (val & SP5100_WDT_DISABLED) {
+- dev_err(dev, "Watchdog hardware is disabled\n");
+- ret = -ENODEV;
+- goto unreg_region;
++ ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
++ if (!ret) {
++ /* Setup the watchdog timer */
++ tco_timer_enable(tco);
++ ret = sp5100_tco_timer_init(tco);
+ }
+
+- /*
+- * Save WatchDogFired status, because WatchDogFired flag is
+- * cleared here.
+- */
+- if (val & SP5100_WDT_FIRED)
+- wdd->bootstatus = WDIOF_CARDRESET;
+- /* Set watchdog action to reset the system */
+- val &= ~SP5100_WDT_ACTION_RESET;
+- writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+-
+- /* Set a reasonable heartbeat before we stop the timer */
+- tco_timer_set_timeout(wdd, wdd->timeout);
+-
+- /*
+- * Stop the TCO before we change anything so we don't race with
+- * a zeroed timer.
+- */
+- tco_timer_stop(wdd);
+-
+- release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
+-
+- return 0;
+-
+-unreg_region:
+ release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
+ return ret;
+ }
+diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
+index adf015aa4126f..6a0986d2c94b7 100644
+--- a/drivers/watchdog/sp5100_tco.h
++++ b/drivers/watchdog/sp5100_tco.h
+@@ -58,6 +58,7 @@
+ #define SB800_PM_WATCHDOG_SECOND_RES GENMASK(1, 0)
+ #define SB800_ACPI_MMIO_DECODE_EN BIT(0)
+ #define SB800_ACPI_MMIO_SEL BIT(1)
++#define SB800_ACPI_MMIO_MASK GENMASK(1, 0)
+
+ #define SB800_PM_WDT_MMIO_OFFSET 0xB00
+
+@@ -82,4 +83,10 @@
+ #define EFCH_PM_ISACONTROL_MMIOEN BIT(1)
+
+ #define EFCH_PM_ACPI_MMIO_ADDR 0xfed80000
++#define EFCH_PM_ACPI_MMIO_PM_OFFSET 0x00000300
+ #define EFCH_PM_ACPI_MMIO_WDT_OFFSET 0x00000b00
++
++#define EFCH_PM_ACPI_MMIO_PM_ADDR (EFCH_PM_ACPI_MMIO_ADDR + \
++ EFCH_PM_ACPI_MMIO_PM_OFFSET)
++#define EFCH_PM_ACPI_MMIO_PM_SIZE 8
++#define AMD_ZEN_SMBUS_PCI_REV 0x51
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 5964f8aee090f..0d6c0885b2d74 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -727,10 +727,22 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ {
+ struct inode *inode = d_inode(path->dentry);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+- int seq = 0;
++ struct key *key;
++ int ret, seq = 0;
+
+ _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
+
++ if (!(query_flags & AT_STATX_DONT_SYNC) &&
++ !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
++ key = afs_request_key(vnode->volume->cell);
++ if (IS_ERR(key))
++ return PTR_ERR(key);
++ ret = afs_validate(vnode, key);
++ key_put(key);
++ if (ret < 0)
++ return ret;
++ }
++
+ do {
+ read_seqbegin_or_lock(&vnode->cb_lock, &seq);
+ generic_fillattr(&init_user_ns, inode, stat);
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index ea00e1a91250c..9d334816eac07 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -94,7 +94,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
+ le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
+ le32_to_cpu(tcon->fsAttrInfo.Attributes),
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
+- tcon->tidStatus);
++ tcon->status);
+ if (dev_type == FILE_DEVICE_DISK)
+ seq_puts(m, " type: DISK ");
+ else if (dev_type == FILE_DEVICE_CD_ROM)
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 10aa0fb946138..59d22261e0821 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -701,14 +701,14 @@ static void cifs_umount_begin(struct super_block *sb)
+ tcon = cifs_sb_master_tcon(cifs_sb);
+
+ spin_lock(&cifs_tcp_ses_lock);
+- if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
++ if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
+ /* we have other mounts to same share or we have
+ already tried to force umount this and woken up
+ all waiting network requests, nothing to do */
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ } else if (tcon->tc_count == 1)
+- tcon->tidStatus = CifsExiting;
++ tcon->status = TID_EXITING;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 48b343d034309..560ecc4ad87d5 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -115,10 +115,18 @@ enum statusEnum {
+ CifsInNegotiate,
+ CifsNeedSessSetup,
+ CifsInSessSetup,
+- CifsNeedTcon,
+- CifsInTcon,
+- CifsNeedFilesInvalidate,
+- CifsInFilesInvalidate
++};
++
++/* associated with each tree connection to the server */
++enum tid_status_enum {
++ TID_NEW = 0,
++ TID_GOOD,
++ TID_EXITING,
++ TID_NEED_RECON,
++ TID_NEED_TCON,
++ TID_IN_TCON,
++ TID_NEED_FILES_INVALIDATE, /* currently unused */
++ TID_IN_FILES_INVALIDATE
+ };
+
+ enum securityEnum {
+@@ -1038,7 +1046,7 @@ struct cifs_tcon {
+ char *password; /* for share-level security */
+ __u32 tid; /* The 4 byte tree id */
+ __u16 Flags; /* optional support bits */
+- enum statusEnum tidStatus;
++ enum tid_status_enum status;
+ atomic_t num_smbs_sent;
+ union {
+ struct {
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 071e2f21a7db7..aca9338b0877e 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -75,12 +75,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->ses->status != CifsGood ||
+- tcon->tidStatus != CifsNeedReconnect) {
++ if ((tcon->ses->status != CifsGood) || (tcon->status != TID_NEED_RECON)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+- tcon->tidStatus = CifsInFilesInvalidate;
++ tcon->status = TID_IN_FILES_INVALIDATE;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ /* list all files open on tree connection and mark them invalid */
+@@ -100,8 +99,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ mutex_unlock(&tcon->crfid.fid_mutex);
+
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsInFilesInvalidate)
+- tcon->tidStatus = CifsNeedTcon;
++ if (tcon->status == TID_IN_FILES_INVALIDATE)
++ tcon->status = TID_NEED_TCON;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ /*
+@@ -136,7 +135,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ * have tcon) are allowed as we start force umount
+ */
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsExiting) {
++ if (tcon->status == TID_EXITING) {
+ if (smb_command != SMB_COM_WRITE_ANDX &&
+ smb_command != SMB_COM_OPEN_ANDX &&
+ smb_command != SMB_COM_TREE_DISCONNECT) {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 532770c30415d..c3a26f06fdaa1 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -245,7 +245,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ tcon->need_reconnect = true;
+- tcon->tidStatus = CifsNeedReconnect;
++ tcon->status = TID_NEED_RECON;
+ }
+ if (ses->tcon_ipc)
+ ses->tcon_ipc->need_reconnect = true;
+@@ -2217,7 +2217,7 @@ get_ses_fail:
+
+ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ {
+- if (tcon->tidStatus == CifsExiting)
++ if (tcon->status == TID_EXITING)
+ return 0;
+ if (strncmp(tcon->treeName, ctx->UNC, MAX_TREE_SIZE))
+ return 0;
+@@ -4498,12 +4498,12 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+- (tcon->tidStatus != CifsNew &&
+- tcon->tidStatus != CifsNeedTcon)) {
++ (tcon->status != TID_NEW &&
++ tcon->status != TID_NEED_TCON)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+- tcon->tidStatus = CifsInTcon;
++ tcon->status = TID_IN_TCON;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+@@ -4544,13 +4544,13 @@ out:
+
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsInTcon)
+- tcon->tidStatus = CifsNeedTcon;
++ if (tcon->status == TID_IN_TCON)
++ tcon->status = TID_NEED_TCON;
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsInTcon)
+- tcon->tidStatus = CifsGood;
++ if (tcon->status == TID_IN_TCON)
++ tcon->status = TID_GOOD;
+ spin_unlock(&cifs_tcp_ses_lock);
+ tcon->need_reconnect = false;
+ }
+@@ -4566,24 +4566,24 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+- (tcon->tidStatus != CifsNew &&
+- tcon->tidStatus != CifsNeedTcon)) {
++ (tcon->status != TID_NEW &&
++ tcon->status != TID_NEED_TCON)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+- tcon->tidStatus = CifsInTcon;
++ tcon->status = TID_IN_TCON;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsInTcon)
+- tcon->tidStatus = CifsNeedTcon;
++ if (tcon->status == TID_IN_TCON)
++ tcon->status = TID_NEED_TCON;
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsInTcon)
+- tcon->tidStatus = CifsGood;
++ if (tcon->status == TID_IN_TCON)
++ tcon->status = TID_GOOD;
+ spin_unlock(&cifs_tcp_ses_lock);
+ tcon->need_reconnect = false;
+ }
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 56598f7dbe00d..afaf59c221936 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -116,7 +116,7 @@ tconInfoAlloc(void)
+ }
+
+ atomic_inc(&tconInfoAllocCount);
+- ret_buf->tidStatus = CifsNew;
++ ret_buf->status = TID_NEW;
+ ++ret_buf->tc_count;
+ INIT_LIST_HEAD(&ret_buf->openFileList);
+ INIT_LIST_HEAD(&ret_buf->tcon_list);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index f82d6fcb5c646..1704fd358b850 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -163,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ return 0;
+
+ spin_lock(&cifs_tcp_ses_lock);
+- if (tcon->tidStatus == CifsExiting) {
++ if (tcon->status == TID_EXITING) {
+ /*
+ * only tree disconnect, open, and write,
+ * (and ulogoff which does not have tcon)
+@@ -3865,7 +3865,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ goto done;
+ }
+
+- tcon->tidStatus = CifsGood;
++ tcon->status = TID_GOOD;
+ tcon->retry = false;
+ tcon->need_reconnect = false;
+
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index fa071d738c78e..c781c19303db4 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -956,14 +956,16 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ return ret;
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ }
++ pagefault_disable();
+ iocb->ki_flags |= IOCB_NOIO;
+ ret = generic_file_read_iter(iocb, to);
+ iocb->ki_flags &= ~IOCB_NOIO;
++ pagefault_enable();
+ if (ret >= 0) {
+ if (!iov_iter_count(to))
+ return ret;
+ written = ret;
+- } else {
++ } else if (ret != -EFAULT) {
+ if (ret != -EAGAIN)
+ return ret;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 6b23399eaee0c..d368d9a2e8f00 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -669,6 +669,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+
+ /* Check for state != intended state */
+ if (unlikely(state != gl->gl_target)) {
++ if (gh && (ret & LM_OUT_CANCELED))
++ gfs2_holder_wake(gh);
+ if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+ /* move to back of queue and try next entry */
+ if (ret & LM_OUT_CANCELED) {
+@@ -1691,6 +1693,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
+ struct gfs2_glock *gl = gh->gh_gl;
+
+ spin_lock(&gl->gl_lockref.lock);
++ if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
++ !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
++ spin_unlock(&gl->gl_lockref.lock);
++ gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
++ wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
++ spin_lock(&gl->gl_lockref.lock);
++ }
++
+ __gfs2_glock_dq(gh);
+ spin_unlock(&gl->gl_lockref.lock);
+ }
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 66a123306aecb..c8ec876f33ea3 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -131,7 +131,21 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_glock *io_gl;
+
+- error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
++ error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
++ &ip->i_gl);
++ if (unlikely(error))
++ goto fail;
++
++ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
++ &io_gl);
++ if (unlikely(error))
++ goto fail;
++
++ if (blktype != GFS2_BLKST_UNLINKED)
++ gfs2_cancel_delete_work(io_gl);
++ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT,
++ &ip->i_iopen_gh);
++ gfs2_glock_put(io_gl);
+ if (unlikely(error))
+ goto fail;
+
+@@ -161,16 +175,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+
+ set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
+
+- error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+- if (unlikely(error))
+- goto fail;
+- if (blktype != GFS2_BLKST_UNLINKED)
+- gfs2_cancel_delete_work(io_gl);
+- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+- gfs2_glock_put(io_gl);
+- if (unlikely(error))
+- goto fail;
+-
+ /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
+ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
+ inode->i_atime.tv_nsec = 0;
+@@ -716,13 +720,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+ BUG_ON(error);
+
+- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ if (error)
+ goto fail_gunlock2;
+
++ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++ if (error)
++ goto fail_gunlock3;
++
+ error = gfs2_trans_begin(sdp, blocks, 0);
+ if (error)
+- goto fail_gunlock2;
++ goto fail_gunlock3;
+
+ if (blocks > 1) {
+ ip->i_eattr = ip->i_no_addr + 1;
+@@ -731,10 +739,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ init_dinode(dip, ip, symname);
+ gfs2_trans_end(sdp);
+
+- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+- if (error)
+- goto fail_gunlock2;
+-
+ glock_set_object(ip->i_gl, ip);
+ glock_set_object(io_gl, ip);
+ gfs2_set_iop(inode);
+@@ -745,14 +749,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ if (default_acl) {
+ error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ if (error)
+- goto fail_gunlock3;
++ goto fail_gunlock4;
+ posix_acl_release(default_acl);
+ default_acl = NULL;
+ }
+ if (acl) {
+ error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ if (error)
+- goto fail_gunlock3;
++ goto fail_gunlock4;
+ posix_acl_release(acl);
+ acl = NULL;
+ }
+@@ -760,11 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
+ &gfs2_initxattrs, NULL);
+ if (error)
+- goto fail_gunlock3;
++ goto fail_gunlock4;
+
+ error = link_dinode(dip, name, ip, &da);
+ if (error)
+- goto fail_gunlock3;
++ goto fail_gunlock4;
+
+ mark_inode_dirty(inode);
+ d_instantiate(dentry, inode);
+@@ -782,9 +786,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ unlock_new_inode(inode);
+ return error;
+
+-fail_gunlock3:
++fail_gunlock4:
+ glock_clear_object(ip->i_gl, ip);
+ glock_clear_object(io_gl, ip);
++fail_gunlock3:
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ fail_gunlock2:
+ gfs2_glock_put(io_gl);
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 1ed097e94af2d..85f7e4ee6924f 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -173,7 +173,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+
+ if (*len == 0)
+ return -EINVAL;
+- if (start > maxbytes)
++ if (start >= maxbytes)
+ return -EFBIG;
+
+ /*
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index 66bdaa2cf496a..ca611ac09f7c1 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -20,6 +20,23 @@
+ #include "page.h"
+ #include "btnode.h"
+
++
++/**
++ * nilfs_init_btnc_inode - initialize B-tree node cache inode
++ * @btnc_inode: inode to be initialized
++ *
++ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
++ */
++void nilfs_init_btnc_inode(struct inode *btnc_inode)
++{
++ struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
++
++ btnc_inode->i_mode = S_IFREG;
++ ii->i_flags = 0;
++ memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
++ mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
++}
++
+ void nilfs_btnode_cache_clear(struct address_space *btnc)
+ {
+ invalidate_mapping_pages(btnc, 0, -1);
+@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
+ struct buffer_head *
+ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+ {
+- struct inode *inode = NILFS_BTNC_I(btnc);
++ struct inode *inode = btnc->host;
+ struct buffer_head *bh;
+
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
+@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
+ struct buffer_head **pbh, sector_t *submit_ptr)
+ {
+ struct buffer_head *bh;
+- struct inode *inode = NILFS_BTNC_I(btnc);
++ struct inode *inode = btnc->host;
+ struct page *page;
+ int err;
+
+@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
+ struct nilfs_btnode_chkey_ctxt *ctxt)
+ {
+ struct buffer_head *obh, *nbh;
+- struct inode *inode = NILFS_BTNC_I(btnc);
++ struct inode *inode = btnc->host;
+ __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
+ int err;
+
+diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
+index 11663650add73..bd5544e63a01d 100644
+--- a/fs/nilfs2/btnode.h
++++ b/fs/nilfs2/btnode.h
+@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
+ struct buffer_head *newbh;
+ };
+
++void nilfs_init_btnc_inode(struct inode *btnc_inode);
+ void nilfs_btnode_cache_clear(struct address_space *);
+ struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
+ __u64 blocknr);
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index 3594eabe14194..f544c22fff78b 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
+ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
+ __u64 ptr, struct buffer_head **bhp)
+ {
+- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
++ struct address_space *btnc = btnc_inode->i_mapping;
+ struct buffer_head *bh;
+
+ bh = nilfs_btnode_create_block(btnc, ptr);
+@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
+ struct buffer_head **bhp,
+ const struct nilfs_btree_readahead_info *ra)
+ {
+- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
++ struct address_space *btnc = btnc_inode->i_mapping;
+ struct buffer_head *bh, *ra_bh;
+ sector_t submit_ptr = 0;
+ int ret;
+@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
+ dat = nilfs_bmap_get_dat(btree);
+ }
+
++ ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
++ if (ret < 0)
++ return ret;
++
+ ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
+ if (ret < 0)
+ return ret;
+@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
+ path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
+ path[level].bp_ctxt.bh = path[level].bp_bh;
+ ret = nilfs_btnode_prepare_change_key(
+- &NILFS_BMAP_I(btree)->i_btnode_cache,
++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
+ &path[level].bp_ctxt);
+ if (ret < 0) {
+ nilfs_dat_abort_update(dat,
+@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
+
+ if (buffer_nilfs_node(path[level].bp_bh)) {
+ nilfs_btnode_commit_change_key(
+- &NILFS_BMAP_I(btree)->i_btnode_cache,
++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
+ &path[level].bp_ctxt);
+ path[level].bp_bh = path[level].bp_ctxt.bh;
+ }
+@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
+ &path[level].bp_newreq.bpr_req);
+ if (buffer_nilfs_node(path[level].bp_bh))
+ nilfs_btnode_abort_change_key(
+- &NILFS_BMAP_I(btree)->i_btnode_cache,
++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
+ &path[level].bp_ctxt);
+ }
+
+@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
+ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
+ struct list_head *listp)
+ {
+- struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
++ struct address_space *btcache = btnc_inode->i_mapping;
+ struct list_head lists[NILFS_BTREE_LEVEL_MAX];
+ struct pagevec pvec;
+ struct buffer_head *bh, *head;
+@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
+ path[level].bp_ctxt.newkey = blocknr;
+ path[level].bp_ctxt.bh = *bh;
+ ret = nilfs_btnode_prepare_change_key(
+- &NILFS_BMAP_I(btree)->i_btnode_cache,
++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
+ &path[level].bp_ctxt);
+ if (ret < 0)
+ return ret;
+ nilfs_btnode_commit_change_key(
+- &NILFS_BMAP_I(btree)->i_btnode_cache,
++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
+ &path[level].bp_ctxt);
+ *bh = path[level].bp_ctxt.bh;
+ }
+@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
+
+ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
+ ret = -EIO;
++ else
++ ret = nilfs_attach_btree_node_cache(
++ &NILFS_BMAP_I(bmap)->vfs_inode);
++
+ return ret;
+ }
+
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index dc51d3b7a7bff..3b55e239705f4 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
+ di = NILFS_DAT_I(dat);
+ lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
+ nilfs_palloc_setup_cache(dat, &di->palloc_cache);
+- nilfs_mdt_setup_shadow_map(dat, &di->shadow);
++ err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
++ if (err)
++ goto failed;
+
+ err = nilfs_read_inode_common(dat, raw_inode);
+ if (err)
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index a8f5315f01e3a..04fdd420eae72 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
+ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
+ __u64 vbn, struct buffer_head **out_bh)
+ {
++ struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
+ int ret;
+
+- ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
++ ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
+ vbn ? : pbn, pbn, REQ_OP_READ, 0,
+ out_bh, &pbn);
+ if (ret == -EEXIST) /* internal code (cache hit) */
+@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
+ ii->i_flags = 0;
+ nilfs_bmap_init_gc(ii->i_bmap);
+
+- return 0;
++ return nilfs_attach_btree_node_cache(inode);
+ }
+
+ /**
+@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
+ ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
+ list_del_init(&ii->i_dirty);
+ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
++ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index e3d807d5b83ad..d63d4bbad9fef 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -29,12 +29,16 @@
+ * @cno: checkpoint number
+ * @root: pointer on NILFS root object (mounted checkpoint)
+ * @for_gc: inode for GC flag
++ * @for_btnc: inode for B-tree node cache flag
++ * @for_shadow: inode for shadowed page cache flag
+ */
+ struct nilfs_iget_args {
+ u64 ino;
+ __u64 cno;
+ struct nilfs_root *root;
+- int for_gc;
++ bool for_gc;
++ bool for_btnc;
++ bool for_shadow;
+ };
+
+ static int nilfs_iget_test(struct inode *inode, void *opaque);
+@@ -314,7 +318,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
+ unsigned long ino)
+ {
+ struct nilfs_iget_args args = {
+- .ino = ino, .root = root, .cno = 0, .for_gc = 0
++ .ino = ino, .root = root, .cno = 0, .for_gc = false,
++ .for_btnc = false, .for_shadow = false
+ };
+
+ return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
+@@ -527,6 +532,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
+ return 0;
+
+ ii = NILFS_I(inode);
++ if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
++ if (!args->for_btnc)
++ return 0;
++ } else if (args->for_btnc) {
++ return 0;
++ }
++ if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
++ if (!args->for_shadow)
++ return 0;
++ } else if (args->for_shadow) {
++ return 0;
++ }
++
+ if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
+ return !args->for_gc;
+
+@@ -538,15 +556,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
+ struct nilfs_iget_args *args = opaque;
+
+ inode->i_ino = args->ino;
+- if (args->for_gc) {
++ NILFS_I(inode)->i_cno = args->cno;
++ NILFS_I(inode)->i_root = args->root;
++ if (args->root && args->ino == NILFS_ROOT_INO)
++ nilfs_get_root(args->root);
++
++ if (args->for_gc)
+ NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
+- NILFS_I(inode)->i_cno = args->cno;
+- NILFS_I(inode)->i_root = NULL;
+- } else {
+- if (args->root && args->ino == NILFS_ROOT_INO)
+- nilfs_get_root(args->root);
+- NILFS_I(inode)->i_root = args->root;
+- }
++ if (args->for_btnc)
++ NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
++ if (args->for_shadow)
++ NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
+ return 0;
+ }
+
+@@ -554,7 +574,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino)
+ {
+ struct nilfs_iget_args args = {
+- .ino = ino, .root = root, .cno = 0, .for_gc = 0
++ .ino = ino, .root = root, .cno = 0, .for_gc = false,
++ .for_btnc = false, .for_shadow = false
+ };
+
+ return ilookup5(sb, ino, nilfs_iget_test, &args);
+@@ -564,7 +585,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino)
+ {
+ struct nilfs_iget_args args = {
+- .ino = ino, .root = root, .cno = 0, .for_gc = 0
++ .ino = ino, .root = root, .cno = 0, .for_gc = false,
++ .for_btnc = false, .for_shadow = false
+ };
+
+ return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
+@@ -595,7 +617,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
+ __u64 cno)
+ {
+ struct nilfs_iget_args args = {
+- .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
++ .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
++ .for_btnc = false, .for_shadow = false
+ };
+ struct inode *inode;
+ int err;
+@@ -615,6 +638,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
+ return inode;
+ }
+
++/**
++ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
++ * @inode: inode object
++ *
++ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
++ * or does nothing if the inode already has it. This function allocates
++ * an additional inode to maintain page cache of B-tree nodes one-on-one.
++ *
++ * Return Value: On success, 0 is returned. On errors, one of the following
++ * negative error code is returned.
++ *
++ * %-ENOMEM - Insufficient memory available.
++ */
++int nilfs_attach_btree_node_cache(struct inode *inode)
++{
++ struct nilfs_inode_info *ii = NILFS_I(inode);
++ struct inode *btnc_inode;
++ struct nilfs_iget_args args;
++
++ if (ii->i_assoc_inode)
++ return 0;
++
++ args.ino = inode->i_ino;
++ args.root = ii->i_root;
++ args.cno = ii->i_cno;
++ args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
++ args.for_btnc = true;
++ args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
++
++ btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
++ nilfs_iget_set, &args);
++ if (unlikely(!btnc_inode))
++ return -ENOMEM;
++ if (btnc_inode->i_state & I_NEW) {
++ nilfs_init_btnc_inode(btnc_inode);
++ unlock_new_inode(btnc_inode);
++ }
++ NILFS_I(btnc_inode)->i_assoc_inode = inode;
++ NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
++ ii->i_assoc_inode = btnc_inode;
++
++ return 0;
++}
++
++/**
++ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
++ * @inode: inode object
++ *
++ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
++ * holder inode bound to @inode, or does nothing if @inode doesn't have it.
++ */
++void nilfs_detach_btree_node_cache(struct inode *inode)
++{
++ struct nilfs_inode_info *ii = NILFS_I(inode);
++ struct inode *btnc_inode = ii->i_assoc_inode;
++
++ if (btnc_inode) {
++ NILFS_I(btnc_inode)->i_assoc_inode = NULL;
++ ii->i_assoc_inode = NULL;
++ iput(btnc_inode);
++ }
++}
++
++/**
++ * nilfs_iget_for_shadow - obtain inode for shadow mapping
++ * @inode: inode object that uses shadow mapping
++ *
++ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
++ * caches for shadow mapping. The page cache for data pages is set up
++ * in one inode and the one for b-tree node pages is set up in the
++ * other inode, which is attached to the former inode.
++ *
++ * Return Value: On success, a pointer to the inode for data pages is
++ * returned. On errors, one of the following negative error code is returned
++ * in a pointer type.
++ *
++ * %-ENOMEM - Insufficient memory available.
++ */
++struct inode *nilfs_iget_for_shadow(struct inode *inode)
++{
++ struct nilfs_iget_args args = {
++ .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
++ .for_btnc = false, .for_shadow = true
++ };
++ struct inode *s_inode;
++ int err;
++
++ s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
++ nilfs_iget_set, &args);
++ if (unlikely(!s_inode))
++ return ERR_PTR(-ENOMEM);
++ if (!(s_inode->i_state & I_NEW))
++ return inode;
++
++ NILFS_I(s_inode)->i_flags = 0;
++ memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
++ mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
++
++ err = nilfs_attach_btree_node_cache(s_inode);
++ if (unlikely(err)) {
++ iget_failed(s_inode);
++ return ERR_PTR(err);
++ }
++ unlock_new_inode(s_inode);
++ return s_inode;
++}
++
+ void nilfs_write_inode_common(struct inode *inode,
+ struct nilfs_inode *raw_inode, int has_bmap)
+ {
+@@ -762,7 +892,8 @@ static void nilfs_clear_inode(struct inode *inode)
+ if (test_bit(NILFS_I_BMAP, &ii->i_state))
+ nilfs_bmap_clear(ii->i_bmap);
+
+- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
++ if (!test_bit(NILFS_I_BTNC, &ii->i_state))
++ nilfs_detach_btree_node_cache(inode);
+
+ if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
+ nilfs_put_root(ii->i_root);
+diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
+index 4b3d33cf0041f..880b5e8cd3ecd 100644
+--- a/fs/nilfs2/mdt.c
++++ b/fs/nilfs2/mdt.c
+@@ -470,9 +470,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
+ void nilfs_mdt_clear(struct inode *inode)
+ {
+ struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
++ struct nilfs_shadow_map *shadow = mdi->mi_shadow;
+
+ if (mdi->mi_palloc_cache)
+ nilfs_palloc_destroy_cache(inode);
++
++ if (shadow) {
++ struct inode *s_inode = shadow->inode;
++
++ shadow->inode = NULL;
++ iput(s_inode);
++ mdi->mi_shadow = NULL;
++ }
+ }
+
+ /**
+@@ -506,12 +515,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
+ struct nilfs_shadow_map *shadow)
+ {
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
++ struct inode *s_inode;
+
+ INIT_LIST_HEAD(&shadow->frozen_buffers);
+- address_space_init_once(&shadow->frozen_data);
+- nilfs_mapping_init(&shadow->frozen_data, inode);
+- address_space_init_once(&shadow->frozen_btnodes);
+- nilfs_mapping_init(&shadow->frozen_btnodes, inode);
++
++ s_inode = nilfs_iget_for_shadow(inode);
++ if (IS_ERR(s_inode))
++ return PTR_ERR(s_inode);
++
++ shadow->inode = s_inode;
+ mi->mi_shadow = shadow;
+ return 0;
+ }
+@@ -525,14 +537,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct nilfs_shadow_map *shadow = mi->mi_shadow;
++ struct inode *s_inode = shadow->inode;
+ int ret;
+
+- ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
++ ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
+ if (ret)
+ goto out;
+
+- ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
+- &ii->i_btnode_cache);
++ ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
++ ii->i_assoc_inode->i_mapping);
+ if (ret)
+ goto out;
+
+@@ -548,7 +561,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
+ struct page *page;
+ int blkbits = inode->i_blkbits;
+
+- page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
++ page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
+ if (!page)
+ return -ENOMEM;
+
+@@ -580,7 +593,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
+ struct page *page;
+ int n;
+
+- page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
++ page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
+ if (page) {
+ if (page_has_buffers(page)) {
+ n = bh_offset(bh) >> inode->i_blkbits;
+@@ -621,10 +634,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
+ nilfs_palloc_clear_cache(inode);
+
+ nilfs_clear_dirty_pages(inode->i_mapping, true);
+- nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
++ nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
+
+- nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
+- nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
++ nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
++ nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
++ NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
+
+ nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
+
+@@ -639,10 +653,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
+ {
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct nilfs_shadow_map *shadow = mi->mi_shadow;
++ struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
+
+ down_write(&mi->mi_sem);
+ nilfs_release_frozen_buffers(shadow);
+- truncate_inode_pages(&shadow->frozen_data, 0);
+- truncate_inode_pages(&shadow->frozen_btnodes, 0);
++ truncate_inode_pages(shadow->inode->i_mapping, 0);
++ truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
+ up_write(&mi->mi_sem);
+ }
+diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
+index 8f86080a436de..9e23bab3ff127 100644
+--- a/fs/nilfs2/mdt.h
++++ b/fs/nilfs2/mdt.h
+@@ -18,14 +18,12 @@
+ /**
+ * struct nilfs_shadow_map - shadow mapping of meta data file
+ * @bmap_store: shadow copy of bmap state
+- * @frozen_data: shadowed dirty data pages
+- * @frozen_btnodes: shadowed dirty b-tree nodes' pages
++ * @inode: holder of page caches used in shadow mapping
+ * @frozen_buffers: list of frozen buffers
+ */
+ struct nilfs_shadow_map {
+ struct nilfs_bmap_store bmap_store;
+- struct address_space frozen_data;
+- struct address_space frozen_btnodes;
++ struct inode *inode;
+ struct list_head frozen_buffers;
+ };
+
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index a7b81755c3501..1344f7d475d3c 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -28,7 +28,7 @@
+ * @i_xattr: <TODO>
+ * @i_dir_start_lookup: page index of last successful search
+ * @i_cno: checkpoint number for GC inode
+- * @i_btnode_cache: cached pages of b-tree nodes
++ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
+ * @i_dirty: list for connecting dirty files
+ * @xattr_sem: semaphore for extended attributes processing
+ * @i_bh: buffer contains disk inode
+@@ -43,7 +43,7 @@ struct nilfs_inode_info {
+ __u64 i_xattr; /* sector_t ??? */
+ __u32 i_dir_start_lookup;
+ __u64 i_cno; /* check point number for GC inode */
+- struct address_space i_btnode_cache;
++ struct inode *i_assoc_inode;
+ struct list_head i_dirty; /* List for connecting dirty files */
+
+ #ifdef CONFIG_NILFS_XATTR
+@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
+ return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
+ }
+
+-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
+-{
+- struct nilfs_inode_info *ii =
+- container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
+- return &ii->vfs_inode;
+-}
+-
+ /*
+ * Dynamic state flags of NILFS on-memory inode (i_state)
+ */
+@@ -98,6 +91,8 @@ enum {
+ NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
+ NILFS_I_BMAP, /* has bmap and btnode_cache */
+ NILFS_I_GCINODE, /* inode for GC, on memory only */
++ NILFS_I_BTNC, /* inode for btree node cache */
++ NILFS_I_SHADOW, /* inode for shadowed page cache */
+ };
+
+ /*
+@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino);
+ extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
+ unsigned long ino, __u64 cno);
++int nilfs_attach_btree_node_cache(struct inode *inode);
++void nilfs_detach_btree_node_cache(struct inode *inode);
++struct inode *nilfs_iget_for_shadow(struct inode *inode);
+ extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
+ extern void nilfs_truncate(struct inode *);
+ extern void nilfs_evict_inode(struct inode *);
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 063dd16d75b59..45e0792950080 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -448,10 +448,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
+ /*
+ * NILFS2 needs clear_page_dirty() in the following two cases:
+ *
+- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
+- * page dirty flags when it copies back pages from the shadow cache
+- * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
+- * (dat->{i_mapping,i_btnode_cache}).
++ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
++ * flag of pages when it copies back pages from shadow cache to the
++ * original cache.
+ *
+ * 2) Some B-tree operations like insertion or deletion may dispose buffers
+ * in dirty state, and this needs to cancel the dirty state of their pages.
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 85a8533347718..0afe0832c7547 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
+ struct list_head *listp)
+ {
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+- struct address_space *mapping = &ii->i_btnode_cache;
++ struct inode *btnc_inode = ii->i_assoc_inode;
+ struct pagevec pvec;
+ struct buffer_head *bh, *head;
+ unsigned int i;
+ pgoff_t index = 0;
+
++ if (!btnc_inode)
++ return;
++
+ pagevec_init(&pvec);
+
+- while (pagevec_lookup_tag(&pvec, mapping, &index,
++ while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
+ PAGECACHE_TAG_DIRTY)) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ bh = head = page_buffers(pvec.pages[i]);
+@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
+ continue;
+ list_del_init(&ii->i_dirty);
+ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
++ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 63e5fa74016c7..c4c6578185d57 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
+ ii->i_bh = NULL;
+ ii->i_state = 0;
+ ii->i_cno = 0;
+- nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
++ ii->i_assoc_inode = NULL;
++ ii->i_bmap = &ii->i_bmap_data;
+ return &ii->vfs_inode;
+ }
+
+@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
+ #ifdef CONFIG_NILFS_XATTR
+ init_rwsem(&ii->xattr_sem);
+ #endif
+- address_space_init_once(&ii->i_btnode_cache);
+- ii->i_bmap = &ii->i_bmap_data;
+ inode_init_once(&ii->vfs_inode);
+ }
+
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index d06134ac6245f..cece702311388 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -339,7 +339,7 @@ static inline void audit_uring_entry(u8 op)
+ }
+ static inline void audit_uring_exit(int success, long code)
+ {
+- if (unlikely(!audit_dummy_context()))
++ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+ }
+ static inline void audit_syscall_entry(int major, unsigned long a0,
+diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
+index 3431011f364dd..cba8a6ffc3290 100644
+--- a/include/linux/ceph/osd_client.h
++++ b/include/linux/ceph/osd_client.h
+@@ -287,6 +287,9 @@ struct ceph_osd_linger_request {
+ rados_watcherrcb_t errcb;
+ void *data;
+
++ struct ceph_pagelist *request_pl;
++ struct page **notify_id_pages;
++
+ struct page ***preply_pages;
+ size_t *preply_len;
+ };
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 8359c50f99884..ec5f71f7135b0 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -262,6 +262,8 @@ resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+ #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
+ #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
+ #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
++#define request_mem_region_muxed(start, n, name) \
++ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
+ #define request_mem_region_exclusive(start,n,name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
+ #define rename_region(region, newname) do { (region)->name = (newname); } while (0)
+diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
+index 808bb4cee2300..b0da04fe087bb 100644
+--- a/include/linux/mc146818rtc.h
++++ b/include/linux/mc146818rtc.h
+@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
+ /* 2 values for divider stage reset, others for "testing purposes only" */
+ # define RTC_DIV_RESET1 0x60
+ # define RTC_DIV_RESET2 0x70
++ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
++# define RTC_AMD_BANK_SELECT 0x10
+ /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
+ # define RTC_RATE_SELECT 0x0F
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index f53ea70384418..dadd4d2f6d8ac 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -891,7 +891,7 @@ struct net_device_path_stack {
+
+ struct net_device_path_ctx {
+ const struct net_device *dev;
+- const u8 *daddr;
++ u8 daddr[ETH_ALEN];
+
+ int num_vlans;
+ struct {
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 25b3ef71f495e..7fc4e9f49f542 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -121,10 +121,12 @@ enum lockdown_reason {
+ LOCKDOWN_DEBUGFS,
+ LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
++ LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_INTEGRITY_MAX,
+ LOCKDOWN_KCORE,
+ LOCKDOWN_KPROBES,
+ LOCKDOWN_BPF_READ_KERNEL,
++ LOCKDOWN_DBG_READ_KERNEL,
+ LOCKDOWN_PERF,
+ LOCKDOWN_TRACEFS,
+ LOCKDOWN_XMON_RW,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index b51bae43b0ddb..9fba950fdf12e 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -56,6 +56,7 @@ struct inet_skb_parm {
+ #define IPSKB_DOREDIRECT BIT(5)
+ #define IPSKB_FRAG_PMTU BIT(6)
+ #define IPSKB_L3SLAVE BIT(7)
++#define IPSKB_NOPOLICY BIT(8)
+
+ u16 frag_max_size;
+ };
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index 947733a639a6f..bd7c3be4af5d7 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -66,11 +66,7 @@ struct netns_xfrm {
+ int sysctl_larval_drop;
+ u32 sysctl_acq_expires;
+
+- u8 policy_default;
+-#define XFRM_POL_DEFAULT_IN 1
+-#define XFRM_POL_DEFAULT_OUT 2
+-#define XFRM_POL_DEFAULT_FWD 4
+-#define XFRM_POL_DEFAULT_MASK 7
++ u8 policy_default[XFRM_POLICY_MAX];
+
+ #ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_hdr;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 76aa6f11a5409..d2efddce65d46 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1081,24 +1081,29 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
+ }
+
+ #ifdef CONFIG_XFRM
+-static inline bool
+-xfrm_default_allow(struct net *net, int dir)
+-{
+- u8 def = net->xfrm.policy_default;
+-
+- switch (dir) {
+- case XFRM_POLICY_IN:
+- return def & XFRM_POL_DEFAULT_IN ? false : true;
+- case XFRM_POLICY_OUT:
+- return def & XFRM_POL_DEFAULT_OUT ? false : true;
+- case XFRM_POLICY_FWD:
+- return def & XFRM_POL_DEFAULT_FWD ? false : true;
+- }
++int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
++ unsigned short family);
++
++static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
++ int dir)
++{
++ if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
++ return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
++
+ return false;
+ }
+
+-int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+- unsigned short family);
++static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
++ int dir, unsigned short family)
++{
++ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
++ /* same dst may be used for traffic originating from
++ * devices with different policy settings.
++ */
++ return IPCB(skb)->flags & IPSKB_NOPOLICY;
++ }
++ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
++}
+
+ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ struct sk_buff *skb,
+@@ -1110,13 +1115,9 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
+ return __xfrm_policy_check(sk, ndir, skb, family);
+
+- if (xfrm_default_allow(net, dir))
+- return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
+- (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+- __xfrm_policy_check(sk, ndir, skb, family);
+- else
+- return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+- __xfrm_policy_check(sk, ndir, skb, family);
++ return __xfrm_check_nopolicy(net, skb, dir) ||
++ __xfrm_check_dev_nopolicy(skb, dir, family) ||
++ __xfrm_policy_check(sk, ndir, skb, family);
+ }
+
+ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+@@ -1168,13 +1169,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+ {
+ struct net *net = dev_net(skb->dev);
+
+- if (xfrm_default_allow(net, XFRM_POLICY_OUT))
+- return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
+- (skb_dst(skb)->flags & DST_NOXFRM) ||
+- __xfrm_route_forward(skb, family);
+- else
+- return (skb_dst(skb)->flags & DST_NOXFRM) ||
+- __xfrm_route_forward(skb, family);
++ if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
++ net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
++ return true;
++
++ return (skb_dst(skb)->flags & DST_NOXFRM) ||
++ __xfrm_route_forward(skb, family);
+ }
+
+ static inline int xfrm4_route_forward(struct sk_buff *skb)
+diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
+index 8e4a2ca0bcbf7..b1523cb8ab307 100644
+--- a/include/uapi/linux/dma-buf.h
++++ b/include/uapi/linux/dma-buf.h
+@@ -92,7 +92,7 @@ struct dma_buf_sync {
+ * between them in actual uapi, they're just different numbers.
+ */
+ #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
+-#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
+-#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
++#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
++#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
+
+ #endif
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index ea2ee1181921e..f3a2abd6d1a19 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -1959,6 +1959,12 @@ void __audit_uring_exit(int success, long code)
+ {
+ struct audit_context *ctx = audit_context();
+
++ if (ctx->dummy) {
++ if (ctx->context != AUDIT_CTX_URING)
++ return;
++ goto out;
++ }
++
+ if (ctx->context == AUDIT_CTX_SYSCALL) {
+ /*
+ * NOTE: See the note in __audit_uring_entry() about the case
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index da06a5553835b..7beceb447211d 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -53,6 +53,7 @@
+ #include <linux/vmacache.h>
+ #include <linux/rcupdate.h>
+ #include <linux/irq.h>
++#include <linux/security.h>
+
+ #include <asm/cacheflush.h>
+ #include <asm/byteorder.h>
+@@ -752,6 +753,29 @@ cpu_master_loop:
+ continue;
+ kgdb_connected = 0;
+ } else {
++ /*
++ * This is a brutal way to interfere with the debugger
++ * and prevent gdb being used to poke at kernel memory.
++ * This could cause trouble if lockdown is applied when
++ * there is already an active gdb session. For now the
++ * answer is simply "don't do that". Typically lockdown
++ * *will* be applied before the debug core gets started
++ * so only developers using kgdb for fairly advanced
++ * early kernel debug can be biten by this. Hopefully
++ * they are sophisticated enough to take care of
++ * themselves, especially with help from the lockdown
++ * message printed on the console!
++ */
++ if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
++ if (IS_ENABLED(CONFIG_KGDB_KDB)) {
++ /* Switch back to kdb if possible... */
++ dbg_kdb_mode = 1;
++ continue;
++ } else {
++ /* ... otherwise just bail */
++ break;
++ }
++ }
+ error = gdb_serial_stub(ks);
+ }
+
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 0852a537dad4c..ead4da9471270 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -45,6 +45,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/uaccess.h>
+ #include <linux/slab.h>
++#include <linux/security.h>
+ #include "kdb_private.h"
+
+ #undef MODULE_PARAM_PREFIX
+@@ -166,10 +167,62 @@ struct task_struct *kdb_curr_task(int cpu)
+ }
+
+ /*
+- * Check whether the flags of the current command and the permissions
+- * of the kdb console has allow a command to be run.
++ * Update the permissions flags (kdb_cmd_enabled) to match the
++ * current lockdown state.
++ *
++ * Within this function the calls to security_locked_down() are "lazy". We
++ * avoid calling them if the current value of kdb_cmd_enabled already excludes
++ * flags that might be subject to lockdown. Additionally we deliberately check
++ * the lockdown flags independently (even though read lockdown implies write
++ * lockdown) since that results in both simpler code and clearer messages to
++ * the user on first-time debugger entry.
++ *
++ * The permission masks during a read+write lockdown permits the following
++ * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE).
++ *
++ * The INSPECT commands are not blocked during lockdown because they are
++ * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes
++ * forcing them to have no arguments) and lsmod. These commands do expose
++ * some kernel state but do not allow the developer seated at the console to
++ * choose what state is reported. SIGNAL and REBOOT should not be controversial,
++ * given these are allowed for root during lockdown already.
++ */
++static void kdb_check_for_lockdown(void)
++{
++ const int write_flags = KDB_ENABLE_MEM_WRITE |
++ KDB_ENABLE_REG_WRITE |
++ KDB_ENABLE_FLOW_CTRL;
++ const int read_flags = KDB_ENABLE_MEM_READ |
++ KDB_ENABLE_REG_READ;
++
++ bool need_to_lockdown_write = false;
++ bool need_to_lockdown_read = false;
++
++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags))
++ need_to_lockdown_write =
++ security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL);
++
++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags))
++ need_to_lockdown_read =
++ security_locked_down(LOCKDOWN_DBG_READ_KERNEL);
++
++ /* De-compose KDB_ENABLE_ALL if required */
++ if (need_to_lockdown_write || need_to_lockdown_read)
++ if (kdb_cmd_enabled & KDB_ENABLE_ALL)
++ kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL;
++
++ if (need_to_lockdown_write)
++ kdb_cmd_enabled &= ~write_flags;
++
++ if (need_to_lockdown_read)
++ kdb_cmd_enabled &= ~read_flags;
++}
++
++/*
++ * Check whether the flags of the current command, the permissions of the kdb
++ * console and the lockdown state allow a command to be run.
+ */
+-static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
++static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+ bool no_args)
+ {
+ /* permissions comes from userspace so needs massaging slightly */
+@@ -1180,6 +1233,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ kdb_curr_task(raw_smp_processor_id());
+
+ KDB_DEBUG_STATE("kdb_local 1", reason);
++
++ kdb_check_for_lockdown();
++
+ kdb_go_count = 0;
+ if (reason == KDB_REASON_DEBUG) {
+ /* special case below */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index baa0fe350246f..2d7a23a7507b6 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -12327,6 +12327,9 @@ SYSCALL_DEFINE5(perf_event_open,
+ * Do not allow to attach to a group in a different task
+ * or CPU context. If we're moving SW events, we'll fix
+ * this up later, so allow that.
++ *
++ * Racy, not holding group_leader->ctx->mutex, see comment with
++ * perf_event_ctx_lock().
+ */
+ if (!move_group && group_leader->ctx != ctx)
+ goto err_context;
+@@ -12392,6 +12395,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
++ goto not_move_group;
+ }
+ }
+
+@@ -12408,7 +12412,17 @@ SYSCALL_DEFINE5(perf_event_open,
+ }
+ } else {
+ mutex_lock(&ctx->mutex);
++
++ /*
++ * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
++ * see the group_leader && !move_group test earlier.
++ */
++ if (group_leader && group_leader->ctx != ctx) {
++ err = -EINVAL;
++ goto err_locked;
++ }
+ }
++not_move_group:
+
+ if (ctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index b50382f957c12..6743c8a0fe8e1 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -39,6 +39,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
+ dev_sw_netstats_rx_add(brdev, skb->len);
+
+ vg = br_vlan_group_rcu(br);
++
++ /* Reset the offload_fwd_mark because there could be a stacked
++ * bridge above, and it should not think this bridge it doing
++ * that bridge's work forwarding out its ports.
++ */
++ br_switchdev_frame_unmark(skb);
++
+ /* Bridge is just like any other port. Make sure the
+ * packet is allowed except in promisc mode when someone
+ * may be running packet capture.
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 1c5815530e0dd..3814e1d50a446 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -537,43 +537,6 @@ static void request_init(struct ceph_osd_request *req)
+ target_init(&req->r_t);
+ }
+
+-/*
+- * This is ugly, but it allows us to reuse linger registration and ping
+- * requests, keeping the structure of the code around send_linger{_ping}()
+- * reasonable. Setting up a min_nr=2 mempool for each linger request
+- * and dealing with copying ops (this blasts req only, watch op remains
+- * intact) isn't any better.
+- */
+-static void request_reinit(struct ceph_osd_request *req)
+-{
+- struct ceph_osd_client *osdc = req->r_osdc;
+- bool mempool = req->r_mempool;
+- unsigned int num_ops = req->r_num_ops;
+- u64 snapid = req->r_snapid;
+- struct ceph_snap_context *snapc = req->r_snapc;
+- bool linger = req->r_linger;
+- struct ceph_msg *request_msg = req->r_request;
+- struct ceph_msg *reply_msg = req->r_reply;
+-
+- dout("%s req %p\n", __func__, req);
+- WARN_ON(kref_read(&req->r_kref) != 1);
+- request_release_checks(req);
+-
+- WARN_ON(kref_read(&request_msg->kref) != 1);
+- WARN_ON(kref_read(&reply_msg->kref) != 1);
+- target_destroy(&req->r_t);
+-
+- request_init(req);
+- req->r_osdc = osdc;
+- req->r_mempool = mempool;
+- req->r_num_ops = num_ops;
+- req->r_snapid = snapid;
+- req->r_snapc = snapc;
+- req->r_linger = linger;
+- req->r_request = request_msg;
+- req->r_reply = reply_msg;
+-}
+-
+ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+ struct ceph_snap_context *snapc,
+ unsigned int num_ops,
+@@ -918,14 +881,30 @@ EXPORT_SYMBOL(osd_req_op_xattr_init);
+ * @watch_opcode: CEPH_OSD_WATCH_OP_*
+ */
+ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
+- u64 cookie, u8 watch_opcode)
++ u8 watch_opcode, u64 cookie, u32 gen)
+ {
+ struct ceph_osd_req_op *op;
+
+ op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
+ op->watch.cookie = cookie;
+ op->watch.op = watch_opcode;
+- op->watch.gen = 0;
++ op->watch.gen = gen;
++}
++
++/*
++ * prot_ver, timeout and notify payload (may be empty) should already be
++ * encoded in @request_pl
++ */
++static void osd_req_op_notify_init(struct ceph_osd_request *req, int which,
++ u64 cookie, struct ceph_pagelist *request_pl)
++{
++ struct ceph_osd_req_op *op;
++
++ op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
++ op->notify.cookie = cookie;
++
++ ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl);
++ op->indata_len = request_pl->length;
+ }
+
+ /*
+@@ -2727,10 +2706,13 @@ static void linger_release(struct kref *kref)
+ WARN_ON(!list_empty(&lreq->pending_lworks));
+ WARN_ON(lreq->osd);
+
+- if (lreq->reg_req)
+- ceph_osdc_put_request(lreq->reg_req);
+- if (lreq->ping_req)
+- ceph_osdc_put_request(lreq->ping_req);
++ if (lreq->request_pl)
++ ceph_pagelist_release(lreq->request_pl);
++ if (lreq->notify_id_pages)
++ ceph_release_page_vector(lreq->notify_id_pages, 1);
++
++ ceph_osdc_put_request(lreq->reg_req);
++ ceph_osdc_put_request(lreq->ping_req);
+ target_destroy(&lreq->t);
+ kfree(lreq);
+ }
+@@ -2999,6 +2981,12 @@ static void linger_commit_cb(struct ceph_osd_request *req)
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
++ if (req != lreq->reg_req) {
++ dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
++ __func__, lreq, lreq->linger_id, req, lreq->reg_req);
++ goto out;
++ }
++
+ dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
+ lreq->linger_id, req->r_result);
+ linger_reg_commit_complete(lreq, req->r_result);
+@@ -3022,6 +3010,7 @@ static void linger_commit_cb(struct ceph_osd_request *req)
+ }
+ }
+
++out:
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
+ }
+@@ -3044,6 +3033,12 @@ static void linger_reconnect_cb(struct ceph_osd_request *req)
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
++ if (req != lreq->reg_req) {
++ dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
++ __func__, lreq, lreq->linger_id, req, lreq->reg_req);
++ goto out;
++ }
++
+ dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
+ lreq, lreq->linger_id, req->r_result, lreq->last_error);
+ if (req->r_result < 0) {
+@@ -3053,46 +3048,64 @@ static void linger_reconnect_cb(struct ceph_osd_request *req)
+ }
+ }
+
++out:
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
+ }
+
+ static void send_linger(struct ceph_osd_linger_request *lreq)
+ {
+- struct ceph_osd_request *req = lreq->reg_req;
+- struct ceph_osd_req_op *op = &req->r_ops[0];
++ struct ceph_osd_client *osdc = lreq->osdc;
++ struct ceph_osd_request *req;
++ int ret;
+
+- verify_osdc_wrlocked(req->r_osdc);
++ verify_osdc_wrlocked(osdc);
++ mutex_lock(&lreq->lock);
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+
+- if (req->r_osd)
+- cancel_linger_request(req);
++ if (lreq->reg_req) {
++ if (lreq->reg_req->r_osd)
++ cancel_linger_request(lreq->reg_req);
++ ceph_osdc_put_request(lreq->reg_req);
++ }
++
++ req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
++ BUG_ON(!req);
+
+- request_reinit(req);
+ target_copy(&req->r_t, &lreq->t);
+ req->r_mtime = lreq->mtime;
+
+- mutex_lock(&lreq->lock);
+ if (lreq->is_watch && lreq->committed) {
+- WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+- op->watch.cookie != lreq->linger_id);
+- op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
+- op->watch.gen = ++lreq->register_gen;
++ osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT,
++ lreq->linger_id, ++lreq->register_gen);
+ dout("lreq %p reconnect register_gen %u\n", lreq,
+- op->watch.gen);
++ req->r_ops[0].watch.gen);
+ req->r_callback = linger_reconnect_cb;
+ } else {
+- if (!lreq->is_watch)
++ if (lreq->is_watch) {
++ osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH,
++ lreq->linger_id, 0);
++ } else {
+ lreq->notify_id = 0;
+- else
+- WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
++
++ refcount_inc(&lreq->request_pl->refcnt);
++ osd_req_op_notify_init(req, 0, lreq->linger_id,
++ lreq->request_pl);
++ ceph_osd_data_pages_init(
++ osd_req_op_data(req, 0, notify, response_data),
++ lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
++ }
+ dout("lreq %p register\n", lreq);
+ req->r_callback = linger_commit_cb;
+ }
+- mutex_unlock(&lreq->lock);
++
++ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
++ BUG_ON(ret);
+
+ req->r_priv = linger_get(lreq);
+ req->r_linger = true;
++ lreq->reg_req = req;
++ mutex_unlock(&lreq->lock);
+
+ submit_request(req, true);
+ }
+@@ -3102,6 +3115,12 @@ static void linger_ping_cb(struct ceph_osd_request *req)
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
++ if (req != lreq->ping_req) {
++ dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
++ __func__, lreq, lreq->linger_id, req, lreq->ping_req);
++ goto out;
++ }
++
+ dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
+ __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
+ lreq->last_error);
+@@ -3117,6 +3136,7 @@ static void linger_ping_cb(struct ceph_osd_request *req)
+ lreq->register_gen, req->r_ops[0].watch.gen);
+ }
+
++out:
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
+ }
+@@ -3124,8 +3144,8 @@ static void linger_ping_cb(struct ceph_osd_request *req)
+ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
+ {
+ struct ceph_osd_client *osdc = lreq->osdc;
+- struct ceph_osd_request *req = lreq->ping_req;
+- struct ceph_osd_req_op *op = &req->r_ops[0];
++ struct ceph_osd_request *req;
++ int ret;
+
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
+ dout("%s PAUSERD\n", __func__);
+@@ -3137,19 +3157,26 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
+ __func__, lreq, lreq->linger_id, lreq->ping_sent,
+ lreq->register_gen);
+
+- if (req->r_osd)
+- cancel_linger_request(req);
++ if (lreq->ping_req) {
++ if (lreq->ping_req->r_osd)
++ cancel_linger_request(lreq->ping_req);
++ ceph_osdc_put_request(lreq->ping_req);
++ }
+
+- request_reinit(req);
+- target_copy(&req->r_t, &lreq->t);
++ req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
++ BUG_ON(!req);
+
+- WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+- op->watch.cookie != lreq->linger_id ||
+- op->watch.op != CEPH_OSD_WATCH_OP_PING);
+- op->watch.gen = lreq->register_gen;
++ target_copy(&req->r_t, &lreq->t);
++ osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id,
++ lreq->register_gen);
+ req->r_callback = linger_ping_cb;
++
++ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
++ BUG_ON(ret);
++
+ req->r_priv = linger_get(lreq);
+ req->r_linger = true;
++ lreq->ping_req = req;
+
+ ceph_osdc_get_request(req);
+ account_request(req);
+@@ -3165,12 +3192,6 @@ static void linger_submit(struct ceph_osd_linger_request *lreq)
+
+ down_write(&osdc->lock);
+ linger_register(lreq);
+- if (lreq->is_watch) {
+- lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
+- lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
+- } else {
+- lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
+- }
+
+ calc_target(osdc, &lreq->t, false);
+ osd = lookup_create_osd(osdc, lreq->t.osd, true);
+@@ -3202,9 +3223,9 @@ static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
+ */
+ static void __linger_cancel(struct ceph_osd_linger_request *lreq)
+ {
+- if (lreq->is_watch && lreq->ping_req->r_osd)
++ if (lreq->ping_req && lreq->ping_req->r_osd)
+ cancel_linger_request(lreq->ping_req);
+- if (lreq->reg_req->r_osd)
++ if (lreq->reg_req && lreq->reg_req->r_osd)
+ cancel_linger_request(lreq->reg_req);
+ cancel_linger_map_check(lreq);
+ unlink_linger(lreq->osd, lreq);
+@@ -4653,43 +4674,6 @@ again:
+ }
+ EXPORT_SYMBOL(ceph_osdc_sync);
+
+-static struct ceph_osd_request *
+-alloc_linger_request(struct ceph_osd_linger_request *lreq)
+-{
+- struct ceph_osd_request *req;
+-
+- req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
+- if (!req)
+- return NULL;
+-
+- ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+- ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+- return req;
+-}
+-
+-static struct ceph_osd_request *
+-alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
+-{
+- struct ceph_osd_request *req;
+-
+- req = alloc_linger_request(lreq);
+- if (!req)
+- return NULL;
+-
+- /*
+- * Pass 0 for cookie because we don't know it yet, it will be
+- * filled in by linger_submit().
+- */
+- osd_req_op_watch_init(req, 0, 0, watch_opcode);
+-
+- if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
+- ceph_osdc_put_request(req);
+- return NULL;
+- }
+-
+- return req;
+-}
+-
+ /*
+ * Returns a handle, caller owns a ref.
+ */
+@@ -4719,18 +4703,6 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
+ lreq->t.flags = CEPH_OSD_FLAG_WRITE;
+ ktime_get_real_ts64(&lreq->mtime);
+
+- lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
+- if (!lreq->reg_req) {
+- ret = -ENOMEM;
+- goto err_put_lreq;
+- }
+-
+- lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
+- if (!lreq->ping_req) {
+- ret = -ENOMEM;
+- goto err_put_lreq;
+- }
+-
+ linger_submit(lreq);
+ ret = linger_reg_commit_wait(lreq);
+ if (ret) {
+@@ -4768,8 +4740,8 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+ ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+ req->r_flags = CEPH_OSD_FLAG_WRITE;
+ ktime_get_real_ts64(&req->r_mtime);
+- osd_req_op_watch_init(req, 0, lreq->linger_id,
+- CEPH_OSD_WATCH_OP_UNWATCH);
++ osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH,
++ lreq->linger_id, 0);
+
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ if (ret)
+@@ -4855,35 +4827,6 @@ out_put_req:
+ }
+ EXPORT_SYMBOL(ceph_osdc_notify_ack);
+
+-static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
+- u64 cookie, u32 prot_ver, u32 timeout,
+- void *payload, u32 payload_len)
+-{
+- struct ceph_osd_req_op *op;
+- struct ceph_pagelist *pl;
+- int ret;
+-
+- op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
+- op->notify.cookie = cookie;
+-
+- pl = ceph_pagelist_alloc(GFP_NOIO);
+- if (!pl)
+- return -ENOMEM;
+-
+- ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
+- ret |= ceph_pagelist_encode_32(pl, timeout);
+- ret |= ceph_pagelist_encode_32(pl, payload_len);
+- ret |= ceph_pagelist_append(pl, payload, payload_len);
+- if (ret) {
+- ceph_pagelist_release(pl);
+- return -ENOMEM;
+- }
+-
+- ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
+- op->indata_len = pl->length;
+- return 0;
+-}
+-
+ /*
+ * @timeout: in seconds
+ *
+@@ -4902,7 +4845,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ size_t *preply_len)
+ {
+ struct ceph_osd_linger_request *lreq;
+- struct page **pages;
+ int ret;
+
+ WARN_ON(!timeout);
+@@ -4915,41 +4857,35 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ if (!lreq)
+ return -ENOMEM;
+
+- lreq->preply_pages = preply_pages;
+- lreq->preply_len = preply_len;
+-
+- ceph_oid_copy(&lreq->t.base_oid, oid);
+- ceph_oloc_copy(&lreq->t.base_oloc, oloc);
+- lreq->t.flags = CEPH_OSD_FLAG_READ;
+-
+- lreq->reg_req = alloc_linger_request(lreq);
+- if (!lreq->reg_req) {
++ lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO);
++ if (!lreq->request_pl) {
+ ret = -ENOMEM;
+ goto out_put_lreq;
+ }
+
+- /*
+- * Pass 0 for cookie because we don't know it yet, it will be
+- * filled in by linger_submit().
+- */
+- ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
+- payload, payload_len);
+- if (ret)
++ ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */
++ ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout);
++ ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len);
++ ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len);
++ if (ret) {
++ ret = -ENOMEM;
+ goto out_put_lreq;
++ }
+
+ /* for notify_id */
+- pages = ceph_alloc_page_vector(1, GFP_NOIO);
+- if (IS_ERR(pages)) {
+- ret = PTR_ERR(pages);
++ lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO);
++ if (IS_ERR(lreq->notify_id_pages)) {
++ ret = PTR_ERR(lreq->notify_id_pages);
++ lreq->notify_id_pages = NULL;
+ goto out_put_lreq;
+ }
+- ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
+- response_data),
+- pages, PAGE_SIZE, 0, false, true);
+
+- ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
+- if (ret)
+- goto out_put_lreq;
++ lreq->preply_pages = preply_pages;
++ lreq->preply_len = preply_len;
++
++ ceph_oid_copy(&lreq->t.base_oid, oid);
++ ceph_oloc_copy(&lreq->t.base_oloc, oloc);
++ lreq->t.flags = CEPH_OSD_FLAG_READ;
+
+ linger_submit(lreq);
+ ret = linger_reg_commit_wait(lreq);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 91cf709c98b37..5f1ac48122777 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -663,11 +663,11 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ const struct net_device *last_dev;
+ struct net_device_path_ctx ctx = {
+ .dev = dev,
+- .daddr = daddr,
+ };
+ struct net_device_path *path;
+ int ret = 0;
+
++ memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
+ stack->num_paths = 0;
+ while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
+ last_dev = ctx.dev;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 180fa6a26ad45..708cc9b1b1767 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3896,7 +3896,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ unsigned int delta_len = 0;
+ struct sk_buff *tail = NULL;
+ struct sk_buff *nskb, *tmp;
+- int err;
++ int len_diff, err;
+
+ skb_push(skb, -skb_network_offset(skb) + offset);
+
+@@ -3936,9 +3936,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ skb_push(nskb, -skb_network_offset(nskb) + offset);
+
+ skb_release_head_state(nskb);
++ len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
+ __copy_skb_header(nskb, skb);
+
+ skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
++ nskb->transport_header += len_diff;
+ skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+ nskb->data - tnl_hlen,
+ offset + tnl_hlen);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index eef07b62b2d88..1cdfac733bd8b 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1721,6 +1721,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ unsigned int flags = RTCF_MULTICAST;
+ struct rtable *rth;
++ bool no_policy;
+ u32 itag = 0;
+ int err;
+
+@@ -1731,8 +1732,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ if (our)
+ flags |= RTCF_LOCAL;
+
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
+- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
++ no_policy, false);
+ if (!rth)
+ return -ENOBUFS;
+
+@@ -1791,7 +1796,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ struct rtable *rth;
+ int err;
+ struct in_device *out_dev;
+- bool do_cache;
++ bool do_cache, no_policy;
+ u32 itag = 0;
+
+ /* get a working reference to the output device */
+@@ -1836,6 +1841,10 @@ static int __mkroute_input(struct sk_buff *skb,
+ }
+ }
+
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ fnhe = find_exception(nhc, daddr);
+ if (do_cache) {
+ if (fnhe)
+@@ -1848,8 +1857,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ }
+ }
+
+- rth = rt_dst_alloc(out_dev->dev, 0, res->type,
+- IN_DEV_ORCONF(in_dev, NOPOLICY),
++ rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
+ IN_DEV_ORCONF(out_dev, NOXFRM));
+ if (!rth) {
+ err = -ENOBUFS;
+@@ -2224,6 +2232,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ struct rtable *rth;
+ struct flowi4 fl4;
+ bool do_cache = true;
++ bool no_policy;
+
+ /* IP on this device is disabled. */
+
+@@ -2341,6 +2350,10 @@ brd_input:
+ RT_CACHE_STAT_INC(in_brd);
+
+ local_input:
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ do_cache &= res->fi && !itag;
+ if (do_cache) {
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+@@ -2355,7 +2368,7 @@ local_input:
+
+ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
+ flags | RTCF_LOCAL, res->type,
+- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
++ no_policy, false);
+ if (!rth)
+ goto e_nobufs;
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index fd51db3be91c4..92e9d75dba2f4 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2826,8 +2826,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
+ void *ext_hdrs[SADB_EXT_MAX];
+ int err;
+
+- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+- BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
++ err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
++ BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
++ if (err)
++ return err;
+
+ memset(ext_hdrs, 0, sizeof(ext_hdrs));
+ err = parse_exthdrs(skb, hdr, ext_hdrs);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 48d9553dafe37..7e2404fd85b64 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1405,8 +1405,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
+ goto dont_reorder;
+
+ /* not part of a BA session */
+- if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+- ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
++ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ goto dont_reorder;
+
+ /* new, potentially un-ordered, ampdu frame - process it */
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 645dd984fef03..9ac75689a99dc 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -107,7 +107,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ ptr += 2;
+ }
+ if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
+- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
++ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
+ mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+ ptr += 2;
+ }
+@@ -221,7 +221,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+
+ if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
+ mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
++ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
+ ptr += 2;
+ }
+
+@@ -1236,7 +1236,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
+ WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
+ }
+
+-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
++__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+ {
+ struct csum_pseudo_header header;
+ __wsum csum;
+@@ -1252,15 +1252,25 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+ header.csum = 0;
+
+ csum = csum_partial(&header, sizeof(header), sum);
+- return (__force u16)csum_fold(csum);
++ return csum_fold(csum);
+ }
+
+-static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
++static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
+ {
+ return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ ~csum_unfold(mpext->csum));
+ }
+
++static void put_len_csum(u16 len, __sum16 csum, void *data)
++{
++ __sum16 *sumptr = data + 2;
++ __be16 *ptr = data;
++
++ put_unaligned_be16(len, ptr);
++
++ put_unaligned(csum, sumptr);
++}
++
+ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ struct mptcp_out_options *opts)
+ {
+@@ -1328,8 +1338,9 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ put_unaligned_be32(mpext->subflow_seq, ptr);
+ ptr += 1;
+ if (opts->csum_reqd) {
+- put_unaligned_be32(mpext->data_len << 16 |
+- mptcp_make_csum(mpext), ptr);
++ put_len_csum(mpext->data_len,
++ mptcp_make_csum(mpext),
++ ptr);
+ } else {
+ put_unaligned_be32(mpext->data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+@@ -1376,11 +1387,12 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ goto mp_capable_done;
+
+ if (opts->csum_reqd) {
+- put_unaligned_be32(opts->data_len << 16 |
+- __mptcp_make_csum(opts->data_seq,
+- opts->subflow_seq,
+- opts->data_len,
+- ~csum_unfold(opts->csum)), ptr);
++ put_len_csum(opts->data_len,
++ __mptcp_make_csum(opts->data_seq,
++ opts->subflow_seq,
++ opts->data_len,
++ ~csum_unfold(opts->csum)),
++ ptr);
+ } else {
+ put_unaligned_be32(opts->data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 7bea318ac5f28..1eb83cbe8aae0 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -178,14 +178,13 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool update_subflows;
+
+- update_subflows = (ssk->sk_state == TCP_CLOSE) &&
+- (subflow->request_join || subflow->mp_join);
++ update_subflows = subflow->request_join || subflow->mp_join;
+ if (!READ_ONCE(pm->work_pending) && !update_subflows)
+ return;
+
+ spin_lock_bh(&pm->lock);
+ if (update_subflows)
+- pm->subflows--;
++ __mptcp_pm_close_subflow(msk);
+
+ /* Even if this subflow is not really established, tell the PM to try
+ * to pick the next ones, if possible.
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 85317ce38e3fa..aec767ee047ab 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -725,7 +725,7 @@ void mptcp_token_destroy(struct mptcp_sock *msk);
+ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
+
+ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
+-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
++__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
+
+ void __init mptcp_pm_init(void);
+ void mptcp_pm_data_init(struct mptcp_sock *msk);
+@@ -835,6 +835,20 @@ unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
+
++/* called under PM lock */
++static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
++{
++ if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
++ WRITE_ONCE(msk->pm.accept_subflow, true);
++}
++
++static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
++{
++ spin_lock_bh(&msk->pm.lock);
++ __mptcp_pm_close_subflow(msk);
++ spin_unlock_bh(&msk->pm.lock);
++}
++
+ void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index bea47a1180dc2..651f01d13191e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -846,7 +846,7 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ u32 offset, seq, delta;
+- u16 csum;
++ __sum16 csum;
+ int len;
+
+ if (!csum_reqd)
+@@ -1380,20 +1380,20 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ struct sockaddr_storage addr;
+ int remote_id = remote->id;
+ int local_id = loc->id;
++ int err = -ENOTCONN;
+ struct socket *sf;
+ struct sock *ssk;
+ u32 remote_token;
+ int addrlen;
+ int ifindex;
+ u8 flags;
+- int err;
+
+ if (!mptcp_is_fully_established(sk))
+- return -ENOTCONN;
++ goto err_out;
+
+ err = mptcp_subflow_create_socket(sk, &sf);
+ if (err)
+- return err;
++ goto err_out;
+
+ ssk = sf->sk;
+ subflow = mptcp_subflow_ctx(ssk);
+@@ -1456,6 +1456,12 @@ failed_unlink:
+ failed:
+ subflow->disposable = 1;
+ sock_release(sf);
++
++err_out:
++ /* we account subflows before the creation, and this failures will not
++ * be caught by sk_state_change()
++ */
++ mptcp_pm_close_subflow(msk);
+ return err;
+ }
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index b90eca7a2f22b..9fb407084c506 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -173,12 +173,11 @@ EXPORT_SYMBOL_GPL(flow_offload_route_init);
+
+ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
+ {
+- tcp->state = TCP_CONNTRACK_ESTABLISHED;
+ tcp->seen[0].td_maxwin = 0;
+ tcp->seen[1].td_maxwin = 0;
+ }
+
+-static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
++static void flow_offload_fixup_ct(struct nf_conn *ct)
+ {
+ struct net *net = nf_ct_net(ct);
+ int l4num = nf_ct_protonum(ct);
+@@ -187,7 +186,9 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+- timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
++ flow_offload_fixup_tcp(&ct->proto.tcp);
++
++ timeout = tn->timeouts[ct->proto.tcp.state];
+ timeout -= tn->offload_timeout;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+@@ -205,18 +206,6 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
+ }
+
+-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+-{
+- if (nf_ct_protonum(ct) == IPPROTO_TCP)
+- flow_offload_fixup_tcp(&ct->proto.tcp);
+-}
+-
+-static void flow_offload_fixup_ct(struct nf_conn *ct)
+-{
+- flow_offload_fixup_ct_state(ct);
+- flow_offload_fixup_ct_timeout(ct);
+-}
+-
+ static void flow_offload_route_release(struct flow_offload *flow)
+ {
+ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+@@ -329,8 +318,10 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+ u32 timeout;
+
+ timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+- if (READ_ONCE(flow->timeout) != timeout)
++ if (timeout - READ_ONCE(flow->timeout) > HZ)
+ WRITE_ONCE(flow->timeout, timeout);
++ else
++ return;
+
+ if (likely(!nf_flowtable_hw_offload(flow_table)))
+ return;
+@@ -353,22 +344,14 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+ nf_flow_offload_rhash_params);
+-
+- clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
+-
+- if (nf_flow_has_expired(flow))
+- flow_offload_fixup_ct(flow->ct);
+- else
+- flow_offload_fixup_ct_timeout(flow->ct);
+-
+ flow_offload_free(flow);
+ }
+
+ void flow_offload_teardown(struct flow_offload *flow)
+ {
++ clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
+ set_bit(NF_FLOW_TEARDOWN, &flow->flags);
+-
+- flow_offload_fixup_ct_state(flow->ct);
++ flow_offload_fixup_ct(flow->ct);
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_teardown);
+
+@@ -399,7 +382,8 @@ EXPORT_SYMBOL_GPL(flow_offload_lookup);
+
+ static int
+ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+- void (*iter)(struct flow_offload *flow, void *data),
++ void (*iter)(struct nf_flowtable *flowtable,
++ struct flow_offload *flow, void *data),
+ void *data)
+ {
+ struct flow_offload_tuple_rhash *tuplehash;
+@@ -423,7 +407,7 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+- iter(flow, data);
++ iter(flow_table, flow, data);
+ }
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+@@ -431,34 +415,12 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ return err;
+ }
+
+-static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
++static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
++ struct flow_offload *flow, void *data)
+ {
+- struct dst_entry *dst;
+-
+- if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
+- tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
+- dst = tuple->dst_cache;
+- if (!dst_check(dst, tuple->dst_cookie))
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static bool nf_flow_has_stale_dst(struct flow_offload *flow)
+-{
+- return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
+- flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
+-}
+-
+-static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+-{
+- struct nf_flowtable *flow_table = data;
+-
+ if (nf_flow_has_expired(flow) ||
+- nf_ct_is_dying(flow->ct) ||
+- nf_flow_has_stale_dst(flow))
+- set_bit(NF_FLOW_TEARDOWN, &flow->flags);
++ nf_ct_is_dying(flow->ct))
++ flow_offload_teardown(flow);
+
+ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (test_bit(NF_FLOW_HW, &flow->flags)) {
+@@ -479,7 +441,7 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
+ struct nf_flowtable *flow_table;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+ }
+
+@@ -595,7 +557,8 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_init);
+
+-static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
++static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
++ struct flow_offload *flow, void *data)
+ {
+ struct net_device *dev = data;
+
+@@ -637,11 +600,10 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
+
+ cancel_delayed_work_sync(&flow_table->gc_work);
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ nf_flow_table_offload_flush(flow_table);
+ if (nf_flowtable_hw_offload(flow_table))
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
+- flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ rhashtable_destroy(&flow_table->rhashtable);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 6257d87c3a56d..28026467b54cd 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -227,6 +227,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ return true;
+ }
+
++static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
++{
++ if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
++ tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
++ return true;
++
++ return dst_check(tuple->dst_cache, tuple->dst_cookie);
++}
++
+ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ struct dst_entry *dst)
+@@ -346,6 +355,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
+ return NF_ACCEPT;
+
++ if (!nf_flow_dst_check(&tuplehash->tuple)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (skb_try_make_writable(skb, thoff + hdrsize))
+ return NF_DROP;
+
+@@ -582,6 +596,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
+ return NF_ACCEPT;
+
++ if (!nf_flow_dst_check(&tuplehash->tuple)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (skb_try_make_writable(skb, thoff + hdrsize))
+ return NF_DROP;
+
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 0af34ad414796..aac6db8680d47 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -36,6 +36,15 @@ static void nft_default_forward_path(struct nf_flow_route *route,
+ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
+ }
+
++static bool nft_is_valid_ether_device(const struct net_device *dev)
++{
++ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
++ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
++ return false;
++
++ return true;
++}
++
+ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ const struct dst_entry *dst_cache,
+ const struct nf_conn *ct,
+@@ -47,6 +56,9 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ struct neighbour *n;
+ u8 nud_state;
+
++ if (!nft_is_valid_ether_device(dev))
++ goto out;
++
+ n = dst_neigh_lookup(dst_cache, daddr);
+ if (!n)
+ return -1;
+@@ -60,6 +72,7 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ if (!(nud_state & NUD_VALID))
+ return -1;
+
++out:
+ return dev_fill_forward_path(dev, ha, stack);
+ }
+
+@@ -78,15 +91,6 @@ struct nft_forward_info {
+ enum flow_offload_xmit_type xmit_type;
+ };
+
+-static bool nft_is_valid_ether_device(const struct net_device *dev)
+-{
+- if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+- dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+- return false;
+-
+- return true;
+-}
+-
+ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+ struct nft_forward_info *info,
+ unsigned char *ha, struct nf_flowtable *flowtable)
+@@ -119,7 +123,8 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+ info->indev = NULL;
+ break;
+ }
+- info->outdev = path->dev;
++ if (!info->outdev)
++ info->outdev = path->dev;
+ info->encap[info->num_encaps].id = path->encap.id;
+ info->encap[info->num_encaps].proto = path->encap.proto;
+ info->num_encaps++;
+@@ -293,7 +298,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ case IPPROTO_TCP:
+ tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
+ sizeof(_tcph), &_tcph);
+- if (unlikely(!tcph || tcph->fin || tcph->rst))
++ if (unlikely(!tcph || tcph->fin || tcph->rst ||
++ !nf_conntrack_tcp_established(ct)))
+ goto out;
+ break;
+ case IPPROTO_UDP:
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 6055dc9a82aa0..aa5e712adf078 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+
+ skb_frag = nci_skb_alloc(ndev,
+ (NCI_DATA_HDR_SIZE + frag_len),
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (skb_frag == NULL) {
+ rc = -ENOMEM;
+ goto free_exit;
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index 19703a649b5a6..78c4b6addf15a 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+
+ i = 0;
+ skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
+- NCI_DATA_HDR_SIZE, GFP_KERNEL);
++ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+@@ -184,7 +184,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+ if (i < data_len) {
+ skb = nci_skb_alloc(ndev,
+ conn_info->max_pkt_payload_len +
+- NCI_DATA_HDR_SIZE, GFP_KERNEL);
++ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 0eaaf1f45de17..211c757bfc3c4 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -232,6 +232,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ for (i = 0; i < p->tcfp_nkeys; ++i) {
+ u32 cur = p->tcfp_keys[i].off;
+
++ /* sanitize the shift value for any later use */
++ p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
++ p->tcfp_keys[i].shift);
++
+ /* The AT option can read a single byte, we can bound the actual
+ * value with uchar max.
+ */
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index dc171ca0d1b12..0c20df052db37 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3128,6 +3128,15 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
+ chandef->width =
+ nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
++ if (chandef->chan->band == NL80211_BAND_S1GHZ) {
++ /* User input error for channel width doesn't match channel */
++ if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
++ NL_SET_ERR_MSG_ATTR(extack,
++ attrs[NL80211_ATTR_CHANNEL_WIDTH],
++ "bad channel width");
++ return -EINVAL;
++ }
++ }
+ if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
+ chandef->center_freq1 =
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
+@@ -11564,18 +11573,23 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
+ struct cfg80211_bitrate_mask mask;
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
++ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ if (!rdev->ops->set_bitrate_mask)
+ return -EOPNOTSUPP;
+
++ wdev_lock(wdev);
+ err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
+ NL80211_ATTR_TX_RATES, &mask,
+ dev, true);
+ if (err)
+- return err;
++ goto out;
+
+- return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
++ err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
++out:
++ wdev_unlock(wdev);
++ return err;
+ }
+
+ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 4a6d864329106..6d82bd9eaf8c7 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1829,7 +1829,7 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) {
+ struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data;
+
+- return s1gop->primary_ch;
++ return s1gop->oper_ch;
+ }
+ } else {
+ tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen);
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 882526159d3a9..19aa994f5d2c2 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3158,7 +3158,7 @@ ok:
+
+ nopol:
+ if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
+- !xfrm_default_allow(net, dir)) {
++ net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
+ err = -EPERM;
+ goto error;
+ }
+@@ -3569,7 +3569,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ }
+
+ if (!pol) {
+- if (!xfrm_default_allow(net, dir)) {
++ if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
+ return 0;
+ }
+@@ -3629,7 +3629,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ }
+ xfrm_nr = ti;
+
+- if (!xfrm_default_allow(net, dir) && !xfrm_nr) {
++ if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
++ !xfrm_nr) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
+ goto reject;
+ }
+@@ -4118,6 +4119,9 @@ static int __net_init xfrm_net_init(struct net *net)
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
+ seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
++ net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
++ net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
++ net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
+
+ rv = xfrm_statistics_init(net);
+ if (rv < 0)
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 72b2f173aac8b..64fa8fdd6bbd5 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1994,12 +1994,9 @@ static int xfrm_notify_userpolicy(struct net *net)
+ }
+
+ up = nlmsg_data(nlh);
+- up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
++ up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
++ up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
++ up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
+
+ nlmsg_end(skb, nlh);
+
+@@ -2010,26 +2007,26 @@ static int xfrm_notify_userpolicy(struct net *net)
+ return err;
+ }
+
++static bool xfrm_userpolicy_is_valid(__u8 policy)
++{
++ return policy == XFRM_USERPOLICY_BLOCK ||
++ policy == XFRM_USERPOLICY_ACCEPT;
++}
++
+ static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attrs)
+ {
+ struct net *net = sock_net(skb->sk);
+ struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
+
+- if (up->in == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
+- else if (up->in == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
++ if (xfrm_userpolicy_is_valid(up->in))
++ net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
+
+- if (up->fwd == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
+- else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
++ if (xfrm_userpolicy_is_valid(up->fwd))
++ net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
+
+- if (up->out == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
+- else if (up->out == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
++ if (xfrm_userpolicy_is_valid(up->out))
++ net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
+
+ rt_genid_bump_all(net);
+
+@@ -2059,13 +2056,9 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+
+ r_up = nlmsg_data(r_nlh);
+-
+- r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
++ r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
++ r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
++ r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
+ nlmsg_end(r_skb, r_nlh);
+
+ return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index d3c3a61308ada..94dcec2cc803f 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -979,6 +979,7 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
+
+ fprintf(out, "\n$(deps_config): ;\n");
+
++ fflush(out);
+ ret = ferror(out); /* error check for all fprintf() calls */
+ fclose(out);
+ if (ret)
+@@ -1097,6 +1098,7 @@ static int __conf_write_autoconf(const char *filename,
+ if ((sym->flags & SYMBOL_WRITE) && sym->name)
+ print_symbol(file, sym);
+
++ fflush(file);
+ /* check possible errors in conf_write_heading() and print_symbol() */
+ ret = ferror(file);
+ fclose(file);
+diff --git a/security/security.c b/security/security.c
+index b7cf5cbfdc677..aaf6566deb9f0 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+ [LOCKDOWN_DEBUGFS] = "debugfs access",
+ [LOCKDOWN_XMON_WR] = "xmon write access",
+ [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
++ [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
+ [LOCKDOWN_INTEGRITY_MAX] = "integrity",
+ [LOCKDOWN_KCORE] = "/proc/kcore access",
+ [LOCKDOWN_KPROBES] = "use of kprobes",
+ [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
++ [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
+ [LOCKDOWN_PERF] = "unsafe use of perf",
+ [LOCKDOWN_TRACEFS] = "use of tracefs",
+ [LOCKDOWN_XMON_RW] = "xmon read and write access",
+diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
+index 0ae4e4e57a401..3fb8f9026e9be 100644
+--- a/security/selinux/ss/hashtab.c
++++ b/security/selinux/ss/hashtab.c
+@@ -179,7 +179,8 @@ int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
+ kmem_cache_free(hashtab_node_cachep, cur);
+ }
+ }
+- kmem_cache_free(hashtab_node_cachep, new);
++ kfree(new->htable);
++ memset(new, 0, sizeof(*new));
+ return -ENOMEM;
+ }
+
+diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
+index 69cbc79fbb716..2aaaa68071744 100644
+--- a/sound/isa/wavefront/wavefront_synth.c
++++ b/sound/isa/wavefront/wavefront_synth.c
+@@ -1094,7 +1094,8 @@ wavefront_send_sample (snd_wavefront_t *dev,
+
+ if (dataptr < data_end) {
+
+- __get_user (sample_short, dataptr);
++ if (get_user(sample_short, dataptr))
++ return -EFAULT;
+ dataptr += skip;
+
+ if (data_is_unsigned) { /* GUS ? */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 51c54cf0f3127..e38acdbe1a3b5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -937,6 +937,9 @@ static int alc_init(struct hda_codec *codec)
+ return 0;
+ }
+
++#define alc_free snd_hda_gen_free
++
++#ifdef CONFIG_PM
+ static inline void alc_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -950,9 +953,6 @@ static inline void alc_shutup(struct hda_codec *codec)
+ alc_shutup_pins(codec);
+ }
+
+-#define alc_free snd_hda_gen_free
+-
+-#ifdef CONFIG_PM
+ static void alc_power_eapd(struct hda_codec *codec)
+ {
+ alc_auto_setup_eapd(codec, false);
+@@ -966,9 +966,7 @@ static int alc_suspend(struct hda_codec *codec)
+ spec->power_hook(codec);
+ return 0;
+ }
+-#endif
+
+-#ifdef CONFIG_PM
+ static int alc_resume(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -9236,6 +9234,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
++ SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1111, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1119, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1129, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -11106,6 +11112,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
+ SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
++ SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 0ea39565e6232..40a5e3eb4ef26 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3235,6 +3235,15 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+
++/* Rane SL-1 */
++{
++ USB_DEVICE(0x13e5, 0x0001),
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ }
++},
++
+ /* disabled due to regression for other devices;
+ * see https://bugzilla.kernel.org/show_bug.cgi?id=199905
+ */
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index ae61f464043a1..c6a48d0ef9ff0 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -98,6 +98,7 @@ FEATURE_TESTS_EXTRA := \
+ llvm-version \
+ clang \
+ libbpf \
++ libbpf-btf__load_from_kernel_by_id \
+ libpfm4 \
+ libdebuginfod \
+ clang-bpf-co-re
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index de66e1cc07348..cb4a2a4fa2e48 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -57,6 +57,7 @@ FILES= \
+ test-lzma.bin \
+ test-bpf.bin \
+ test-libbpf.bin \
++ test-libbpf-btf__load_from_kernel_by_id.bin \
+ test-get_cpuid.bin \
+ test-sdt.bin \
+ test-cxx.bin \
+@@ -287,6 +288,9 @@ $(OUTPUT)test-bpf.bin:
+ $(OUTPUT)test-libbpf.bin:
+ $(BUILD) -lbpf
+
++$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
++ $(BUILD) -lbpf
++
+ $(OUTPUT)test-sdt.bin:
+ $(BUILD)
+
+diff --git a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+new file mode 100644
+index 0000000000000..f7c084428735a
+--- /dev/null
++++ b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+@@ -0,0 +1,7 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <bpf/libbpf.h>
++
++int main(void)
++{
++ return btf__load_from_kernel_by_id(20151128, NULL);
++}
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index f3bf9297bcc03..1bd64e7404b9f 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -553,9 +553,16 @@ ifndef NO_LIBELF
+ ifeq ($(feature-libbpf), 1)
+ EXTLIBS += -lbpf
+ $(call detected,CONFIG_LIBBPF_DYNAMIC)
++
++ $(call feature_check,libbpf-btf__load_from_kernel_by_id)
++ ifeq ($(feature-libbpf-btf__load_from_kernel_by_id), 1)
++ CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
++ endif
+ else
+ dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+ endif
++ else
++ CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
+ endif
+ endif
+
+diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
+index 207c56805c551..0ed177991ad05 100644
+--- a/tools/perf/arch/x86/util/perf_regs.c
++++ b/tools/perf/arch/x86/util/perf_regs.c
+@@ -9,6 +9,8 @@
+ #include "../../../util/perf_regs.h"
+ #include "../../../util/debug.h"
+ #include "../../../util/event.h"
++#include "../../../util/pmu.h"
++#include "../../../util/pmu-hybrid.h"
+
+ const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG(AX, PERF_REG_X86_AX),
+@@ -284,12 +286,22 @@ uint64_t arch__intr_reg_mask(void)
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
++ struct perf_pmu *pmu;
+ int fd;
+ /*
+ * In an unnamed union, init it here to build on older gcc versions
+ */
+ attr.sample_period = 1;
+
++ if (perf_pmu__has_hybrid()) {
++ /*
++ * The same register set is supported among different hybrid PMUs.
++ * Only check the first available one.
++ */
++ pmu = list_first_entry(&perf_pmu__hybrid_pmus, typeof(*pmu), hybrid_list);
++ attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
++ }
++
+ event_attr_init(&attr);
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index f2640179ada9e..c2c81567afa50 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -1672,7 +1672,7 @@ static int __bench_numa(const char *name)
+ "GB/sec,", "total-speed", "GB/sec total speed");
+
+ if (g->p.show_details >= 2) {
+- char tname[14 + 2 * 10 + 1];
++ char tname[14 + 2 * 11 + 1];
+ struct thread_data *td;
+ for (p = 0; p < g->p.nr_proc; p++) {
+ for (t = 0; t < g->p.nr_threads; t++) {
+diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
+index 573490530194f..592ab02d5ba30 100644
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -222,11 +222,11 @@ static int __test__bpf(int idx)
+
+ ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+ bpf_testcase_table[idx].prog_id,
+- true, NULL);
++ false, NULL);
+ if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
+ pr_debug("Unable to get BPF object, %s\n",
+ bpf_testcase_table[idx].msg_compile_fail);
+- if (idx == 0)
++ if ((idx == 0) || (ret == TEST_SKIP))
+ return TEST_SKIP;
+ else
+ return TEST_FAIL;
+@@ -370,9 +370,11 @@ static int test__bpf_prologue_test(struct test_suite *test __maybe_unused,
+ static struct test_case bpf_tests[] = {
+ #ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF filtering", basic_bpf_test),
+- TEST_CASE("BPF pinning", bpf_pinning),
++ TEST_CASE_REASON("BPF pinning", bpf_pinning,
++ "clang isn't installed or environment missing BPF support"),
+ #ifdef HAVE_BPF_PROLOGUE
+- TEST_CASE("BPF prologue generation", bpf_prologue_test),
++ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
++ "clang isn't installed or environment missing BPF support"),
+ #else
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+ #endif
+diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
+index b30dba455f36c..9c9ef33e0b3c6 100755
+--- a/tools/perf/tests/shell/stat_all_pmu.sh
++++ b/tools/perf/tests/shell/stat_all_pmu.sh
+@@ -5,6 +5,16 @@
+ set -e
+
+ for p in $(perf list --raw-dump pmu); do
++ # In powerpc, skip the events for hv_24x7 and hv_gpci.
++ # These events needs input values to be filled in for
++ # core, chip, partition id based on system.
++ # Example: hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/
++ # hv_gpci/event,partition_id=?/
++ # Hence skip these events for ppc.
++ if echo "$p" |grep -Eq 'hv_24x7|hv_gpci' ; then
++ echo "Skipping: Event '$p' in powerpc"
++ continue
++ fi
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index a517eaa51eb37..65dfd2c70246e 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -22,7 +22,8 @@
+ #include "record.h"
+ #include "util/synthetic-events.h"
+
+-struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
++#ifndef HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
++struct btf *btf__load_from_kernel_by_id(__u32 id)
+ {
+ struct btf *btf;
+ #pragma GCC diagnostic push
+@@ -32,6 +33,7 @@ struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
+
+ return err ? ERR_PTR(err) : btf;
+ }
++#endif
+
+ struct bpf_program * __weak
+ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index ee6f034812151..9c230b908b76f 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -471,9 +471,10 @@ int perf_stat_process_counter(struct perf_stat_config *config,
+ int perf_event__process_stat_event(struct perf_session *session,
+ union perf_event *event)
+ {
+- struct perf_counts_values count;
++ struct perf_counts_values count, *ptr;
+ struct perf_record_stat *st = &event->stat;
+ struct evsel *counter;
++ int cpu_map_idx;
+
+ count.val = st->val;
+ count.ena = st->ena;
+@@ -484,8 +485,18 @@ int perf_event__process_stat_event(struct perf_session *session,
+ pr_err("Failed to resolve counter for stat event.\n");
+ return -EINVAL;
+ }
+-
+- *perf_counts(counter->counts, st->cpu, st->thread) = count;
++ cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
++ if (cpu_map_idx == -1) {
++ pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
++ return -EINVAL;
++ }
++ ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
++ if (ptr == NULL) {
++ pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
++ st->cpu, st->thread, evsel__name(counter));
++ return -EINVAL;
++ }
++ *ptr = count;
+ counter->supported = true;
+ return 0;
+ }
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
+index 3f4c8cfe7aca8..7cd9b31d03073 100755
+--- a/tools/testing/selftests/net/fcnal-test.sh
++++ b/tools/testing/selftests/net/fcnal-test.sh
+@@ -810,10 +810,16 @@ ipv4_ping()
+ setup
+ set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
+ ipv4_ping_novrf
++ setup
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv4_ping_novrf
+
+ log_subsection "With VRF"
+ setup "yes"
+ ipv4_ping_vrf
++ setup "yes"
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv4_ping_vrf
+ }
+
+ ################################################################################
+@@ -2348,10 +2354,16 @@ ipv6_ping()
+ log_subsection "No VRF"
+ setup
+ ipv6_ping_novrf
++ setup
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv6_ping_novrf
+
+ log_subsection "With VRF"
+ setup "yes"
+ ipv6_ping_vrf
++ setup "yes"
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv6_ping_vrf
+ }
+
+ ################################################################################
+diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
+index 0d7bbe49359d8..1b25cc7c64bbd 100644
+--- a/tools/virtio/Makefile
++++ b/tools/virtio/Makefile
+@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o
+ vringh_test: vringh_test.o vringh.o virtio_ring.o
+
+ CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+-LDFLAGS += -lpthread
++CFLAGS += -pthread
++LDFLAGS += -pthread
+ vpath %.c ../../drivers/virtio ../../drivers/vhost
+ mod:
+ ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 610cc7920c8a2..717ee1b2e058a 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1539,7 +1539,7 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
+ r = kvm_arch_prepare_memory_region(kvm, old, new, change);
+
+ /* Free the bitmap on failure if it was allocated above. */
+- if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
++ if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
+ kvm_destroy_dirty_bitmap(new);
+
+ return r;