summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2022-11-10 13:09:06 -0500
committerMike Pagano <mpagano@gentoo.org>2022-11-10 13:09:06 -0500
commit3352de6037042e4498b85b45074d80d2cc4e22ff (patch)
treeb1746f7406920a725fe938bdaca1a4757d5cfb34
parentLinux patch 5.15.77 (diff)
downloadlinux-patches-3352de6037042e4498b85b45074d80d2cc4e22ff.tar.gz
linux-patches-3352de6037042e4498b85b45074d80d2cc4e22ff.tar.bz2
linux-patches-3352de6037042e4498b85b45074d80d2cc4e22ff.zip
Linux patch 5.15.785.15-82
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1077_linux-5.15.78.patch6520
2 files changed, 6524 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 4186e315..b1c47440 100644
--- a/0000_README
+++ b/0000_README
@@ -351,6 +351,10 @@ Patch: 1076_linux-5.15.77.patch
From: http://www.kernel.org
Desc: Linux 5.15.77
+Patch: 1077_linux-5.15.78.patch
+From: http://www.kernel.org
+Desc: Linux 5.15.78
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1077_linux-5.15.78.patch b/1077_linux-5.15.78.patch
new file mode 100644
index 00000000..4730f772
--- /dev/null
+++ b/1077_linux-5.15.78.patch
@@ -0,0 +1,6520 @@
+diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
+index 533415644c54d..a78350a8fed43 100644
+--- a/Documentation/trace/histogram.rst
++++ b/Documentation/trace/histogram.rst
+@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
+ will use the event's kernel stacktrace as the key. The keywords
+ 'keys' or 'key' can be used to specify keys, and the keywords
+ 'values', 'vals', or 'val' can be used to specify values. Compound
+- keys consisting of up to two fields can be specified by the 'keys'
++ keys consisting of up to three fields can be specified by the 'keys'
+ keyword. Hashing a compound key produces a unique entry in the
+ table for each unique combination of component keys, and can be
+ useful for providing more fine-grained summaries of event data.
+diff --git a/Makefile b/Makefile
+index 3e712ea4c745d..397dcb7af1c85 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 77
++SUBLEVEL = 78
+ EXTRAVERSION =
+ NAME = Trick or Treat
+
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+index 68e5ab2e27e22..6bb4855d13ce5 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+@@ -29,7 +29,7 @@
+
+ user-pb {
+ label = "user_pb";
+- gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
++ gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ };
+
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+index 8e23cec7149e5..696427b487f01 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+@@ -26,7 +26,7 @@
+
+ user-pb {
+ label = "user_pb";
+- gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
++ gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ };
+
+diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
+index 34e5549ea748a..a00b0f14c222f 100644
+--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
++++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
+@@ -597,12 +597,26 @@
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 0>;
++ trips {
++ pmic_crit0: trip0 {
++ temperature = <90000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
+ };
+
+ soc {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 3>;
++ trips {
++ soc_crit0: trip0 {
++ temperature = <80000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
+ };
+
+ big_cluster_thermal_zone: big-cluster {
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+index 6050723172436..63441028622a6 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -758,6 +758,9 @@
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(1)>;
+ status = "disabled";
+ };
+
+@@ -767,6 +770,9 @@
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(1)>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index 1282b61da8a55..12e59777363fe 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -525,6 +525,9 @@
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(2)>;
+ status = "disabled";
+ };
+
+@@ -534,6 +537,9 @@
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(2)>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+index 51c4f61007cdb..1bc7f538f6905 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+@@ -1369,6 +1369,9 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+ little-endian;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(2)>;
+ status = "disabled";
+ };
+
+@@ -1379,6 +1382,9 @@
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
++ clock-frequency = <2500000>;
++ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
++ QORIQ_CLK_PLL_DIV(2)>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+index a79f42a9618ec..639220dbff008 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+@@ -38,9 +38,9 @@ conn_subsys: bus@5b000000 {
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_0>;
+- clock-names = "ipg", "per", "ahb";
++ <&sdhc0_lpcg IMX_LPCG_CLK_0>,
++ <&sdhc0_lpcg IMX_LPCG_CLK_5>;
++ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ status = "disabled";
+ };
+@@ -49,9 +49,9 @@ conn_subsys: bus@5b000000 {
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b020000 0x10000>;
+ clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_0>;
+- clock-names = "ipg", "per", "ahb";
++ <&sdhc1_lpcg IMX_LPCG_CLK_0>,
++ <&sdhc1_lpcg IMX_LPCG_CLK_5>;
++ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+@@ -62,9 +62,9 @@ conn_subsys: bus@5b000000 {
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b030000 0x10000>;
+ clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_0>;
+- clock-names = "ipg", "per", "ahb";
++ <&sdhc2_lpcg IMX_LPCG_CLK_0>,
++ <&sdhc2_lpcg IMX_LPCG_CLK_5>;
++ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ status = "disabled";
+ };
+diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
+index 60225bc09b017..8ecca795aca0b 100644
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -320,7 +320,8 @@ static void cortex_a76_erratum_1463225_svc_handler(void)
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+ }
+
+-static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
++static __always_inline bool
++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+ {
+ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
+ return false;
+diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
+index c5d0097154020..aa06e28f2991f 100644
+--- a/arch/arm64/kvm/hyp/exception.c
++++ b/arch/arm64/kvm/hyp/exception.c
+@@ -13,6 +13,7 @@
+ #include <hyp/adjust_pc.h>
+ #include <linux/kvm_host.h>
+ #include <asm/kvm_emulate.h>
++#include <asm/kvm_mmu.h>
+
+ #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
+ #error Hypervisor code only!
+@@ -115,7 +116,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
+ new |= (old & PSR_C_BIT);
+ new |= (old & PSR_V_BIT);
+
+- if (kvm_has_mte(vcpu->kvm))
++ if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
+ new |= PSR_TCO_BIT;
+
+ new |= (old & PSR_DIT_BIT);
+diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
+index 9d3d7737c58b1..a005ebc547793 100644
+--- a/arch/parisc/include/asm/hardware.h
++++ b/arch/parisc/include/asm/hardware.h
+@@ -10,12 +10,12 @@
+ #define SVERSION_ANY_ID PA_SVERSION_ANY_ID
+
+ struct hp_hardware {
+- unsigned short hw_type:5; /* HPHW_xxx */
+- unsigned short hversion;
+- unsigned long sversion:28;
+- unsigned short opt;
+- const char name[80]; /* The hardware description */
+-};
++ unsigned int hw_type:8; /* HPHW_xxx */
++ unsigned int hversion:12;
++ unsigned int sversion:12;
++ unsigned char opt;
++ unsigned char name[59]; /* The hardware description */
++} __packed;
+
+ struct parisc_device;
+
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index d126e78e101ae..e7ee0c0c91d35 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -882,15 +882,13 @@ void __init walk_central_bus(void)
+ &root);
+ }
+
+-static void print_parisc_device(struct parisc_device *dev)
++static __init void print_parisc_device(struct parisc_device *dev)
+ {
+- char hw_path[64];
+- static int count;
++ static int count __initdata;
+
+- print_pa_hwpath(dev, hw_path);
+- pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+- ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
+- dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
++ pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++ ++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
++ dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
+
+ if (dev->num_addrs) {
+ int k;
+@@ -1079,7 +1077,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
+
+
+
+-static int print_one_device(struct device * dev, void * data)
++static __init int print_one_device(struct device * dev, void * data)
+ {
+ struct parisc_device * pdev = to_parisc_device(dev);
+
+diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
+index 918e05137d4c6..1686a852534fc 100644
+--- a/arch/s390/boot/compressed/vmlinux.lds.S
++++ b/arch/s390/boot/compressed/vmlinux.lds.S
+@@ -93,8 +93,17 @@ SECTIONS
+ _compressed_start = .;
+ *(.vmlinux.bin.compressed)
+ _compressed_end = .;
+- FILL(0xff);
+- . = ALIGN(4096);
++ }
++
++#define SB_TRAILER_SIZE 32
++ /* Trailer needed for Secure Boot */
++ . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
++ . = ALIGN(4096) - SB_TRAILER_SIZE;
++ .sb.trailer : {
++ QUAD(0)
++ QUAD(0)
++ QUAD(0)
++ QUAD(0x000000207a49504c)
+ }
+ _end = .;
+
+diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
+index a596e69d3c474..0b012ce0921c1 100644
+--- a/arch/s390/lib/uaccess.c
++++ b/arch/s390/lib/uaccess.c
+@@ -212,7 +212,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
+ asm volatile(
+ " llilh 0,%[spec]\n"
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+- " jz 4f\n"
++ "6: jz 4f\n"
+ "1: algr %0,%2\n"
+ " slgr %1,%2\n"
+ " j 0b\n"
+@@ -222,11 +222,11 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
+ " clgr %0,%3\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+- " slgr %0,%3\n"
++ "7: slgr %0,%3\n"
+ " j 5f\n"
+ "4: slgr %0,%0\n"
+ "5:\n"
+- EX_TABLE(0b,2b) EX_TABLE(3b,5b)
++ EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
+ : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+ : "a" (empty_zero_page), [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 588b83cc730d3..f36fda2672e04 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4713,6 +4713,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
++ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 266ac8263696a..21a9cb48daf5d 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -936,8 +936,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
+
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
+- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
+
+@@ -958,8 +963,13 @@ struct event_constraint intel_spr_pebs_event_constraints[] = {
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe),
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xfe),
+ INTEL_PSD_CONSTRAINT(0x2cd, 0x1),
+- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),
+- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
+
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index 6a2827d0681fc..e8ab7c1f1080a 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -6,7 +6,7 @@
+ #ifndef _ASM_X86_SYSCALL_WRAPPER_H
+ #define _ASM_X86_SYSCALL_WRAPPER_H
+
+-struct pt_regs;
++#include <asm/ptrace.h>
+
+ extern long __x64_sys_ni_syscall(const struct pt_regs *regs);
+ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index d85a0808a446e..05b27b4a54c9d 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -902,11 +902,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ entry->eax = min(entry->eax, 0x8000001f);
+ break;
+ case 0x80000001:
++ entry->ebx &= ~GENMASK(27, 16);
+ cpuid_entry_override(entry, CPUID_8000_0001_EDX);
+ cpuid_entry_override(entry, CPUID_8000_0001_ECX);
+ break;
+ case 0x80000006:
+- /* L2 cache and TLB: pass through host info. */
++ /* Drop reserved bits, pass host L2 cache and TLB info. */
++ entry->edx &= ~GENMASK(17, 16);
+ break;
+ case 0x80000007: /* Advanced power management */
+ /* invariant TSC is CPUID.80000007H:EDX[8] */
+@@ -936,6 +938,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ g_phys_as = phys_as;
+
+ entry->eax = g_phys_as | (virt_as << 8);
++ entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
+ entry->edx = 0;
+ cpuid_entry_override(entry, CPUID_8000_0008_EBX);
+ break;
+@@ -955,6 +958,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ entry->ecx = entry->edx = 0;
+ break;
+ case 0x8000001a:
++ entry->eax &= GENMASK(2, 0);
++ entry->ebx = entry->ecx = entry->edx = 0;
++ break;
+ case 0x8000001e:
+ break;
+ case 0x8000001F:
+@@ -962,7 +968,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ } else {
+ cpuid_entry_override(entry, CPUID_8000_001F_EAX);
+-
++ /* Clear NumVMPL since KVM does not support VMPL. */
++ entry->ebx &= ~GENMASK(31, 12);
+ /*
+ * Enumerate '0' for "PA bits reduction", the adjusted
+ * MAXPHYADDR is enumerated directly (see 0x80000008).
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 45d82a9501328..cb96e4354f317 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -795,8 +795,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
+ ctxt->mode, linear);
+ }
+
+-static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+- enum x86emul_mode mode)
++static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+ ulong linear;
+ int rc;
+@@ -806,41 +805,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+
+ if (ctxt->op_bytes != sizeof(unsigned long))
+ addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+- rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
++ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+ if (rc == X86EMUL_CONTINUE)
+ ctxt->_eip = addr.ea;
+ return rc;
+ }
+
++static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
++{
++ u64 efer;
++ struct desc_struct cs;
++ u16 selector;
++ u32 base3;
++
++ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
++
++ if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
++ /* Real mode. cpu must not have long mode active */
++ if (efer & EFER_LMA)
++ return X86EMUL_UNHANDLEABLE;
++ ctxt->mode = X86EMUL_MODE_REAL;
++ return X86EMUL_CONTINUE;
++ }
++
++ if (ctxt->eflags & X86_EFLAGS_VM) {
++ /* Protected/VM86 mode. cpu must not have long mode active */
++ if (efer & EFER_LMA)
++ return X86EMUL_UNHANDLEABLE;
++ ctxt->mode = X86EMUL_MODE_VM86;
++ return X86EMUL_CONTINUE;
++ }
++
++ if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
++ return X86EMUL_UNHANDLEABLE;
++
++ if (efer & EFER_LMA) {
++ if (cs.l) {
++ /* Proper long mode */
++ ctxt->mode = X86EMUL_MODE_PROT64;
++ } else if (cs.d) {
++ /* 32 bit compatibility mode*/
++ ctxt->mode = X86EMUL_MODE_PROT32;
++ } else {
++ ctxt->mode = X86EMUL_MODE_PROT16;
++ }
++ } else {
++ /* Legacy 32 bit / 16 bit mode */
++ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
++ }
++
++ return X86EMUL_CONTINUE;
++}
++
+ static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+- return assign_eip(ctxt, dst, ctxt->mode);
++ return assign_eip(ctxt, dst);
+ }
+
+-static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+- const struct desc_struct *cs_desc)
++static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+- enum x86emul_mode mode = ctxt->mode;
+- int rc;
++ int rc = emulator_recalc_and_set_mode(ctxt);
+
+-#ifdef CONFIG_X86_64
+- if (ctxt->mode >= X86EMUL_MODE_PROT16) {
+- if (cs_desc->l) {
+- u64 efer = 0;
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+
+- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+- if (efer & EFER_LMA)
+- mode = X86EMUL_MODE_PROT64;
+- } else
+- mode = X86EMUL_MODE_PROT32; /* temporary value */
+- }
+-#endif
+- if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+- mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+- rc = assign_eip(ctxt, dst, mode);
+- if (rc == X86EMUL_CONTINUE)
+- ctxt->mode = mode;
+- return rc;
++ return assign_eip(ctxt, dst);
+ }
+
+ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+@@ -2153,7 +2182,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++ rc = assign_eip_far(ctxt, ctxt->src.val);
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+@@ -2234,7 +2263,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ &new_desc);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- rc = assign_eip_far(ctxt, eip, &new_desc);
++ rc = assign_eip_far(ctxt, eip);
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+@@ -2617,7 +2646,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ * those side effects need to be explicitly handled for both success
+ * and shutdown.
+ */
+- return X86EMUL_CONTINUE;
++ return emulator_recalc_and_set_mode(ctxt);
+
+ emulate_shutdown:
+ ctxt->ops->triple_fault(ctxt);
+@@ -2861,6 +2890,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+ ctxt->_eip = rdx;
++ ctxt->mode = usermode;
+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+
+ return X86EMUL_CONTINUE;
+@@ -3457,7 +3487,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++ rc = assign_eip_far(ctxt, ctxt->src.val);
+ if (rc != X86EMUL_CONTINUE)
+ goto fail;
+
+@@ -3599,11 +3629,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
+
+ static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+ {
+- if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
++ int cr_num = ctxt->modrm_reg;
++ int r;
++
++ if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
+ return emulate_gp(ctxt, 0);
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
++
++ if (cr_num == 0) {
++ /*
++ * CR0 write might have updated CR0.PE and/or CR0.PG
++ * which can affect the cpu's execution mode.
++ */
++ r = emulator_recalc_and_set_mode(ctxt);
++ if (r != X86EMUL_CONTINUE)
++ return r;
++ }
++
+ return X86EMUL_CONTINUE;
+ }
+
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
+index 03ebe368333ef..c41506ed8c7dd 100644
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -355,25 +355,29 @@ TRACE_EVENT(kvm_inj_virq,
+ * Tracepoint for kvm interrupt injection:
+ */
+ TRACE_EVENT(kvm_inj_exception,
+- TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
+- TP_ARGS(exception, has_error, error_code),
++ TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
++ bool reinjected),
++ TP_ARGS(exception, has_error, error_code, reinjected),
+
+ TP_STRUCT__entry(
+ __field( u8, exception )
+ __field( u8, has_error )
+ __field( u32, error_code )
++ __field( bool, reinjected )
+ ),
+
+ TP_fast_assign(
+ __entry->exception = exception;
+ __entry->has_error = has_error;
+ __entry->error_code = error_code;
++ __entry->reinjected = reinjected;
+ ),
+
+- TP_printk("%s (0x%x)",
++ TP_printk("%s (0x%x)%s",
+ __print_symbolic(__entry->exception, kvm_trace_sym_exc),
+ /* FIXME: don't print error_code if not present */
+- __entry->has_error ? __entry->error_code : 0)
++ __entry->has_error ? __entry->error_code : 0,
++ __entry->reinjected ? " [reinjected]" : "")
+ );
+
+ /*
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index ba457167ae8a2..6918f597b8ab7 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7902,6 +7902,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_virtual_nmis())
+ enable_vnmi = 0;
+
++#ifdef CONFIG_X86_SGX_KVM
++ if (!cpu_has_vmx_encls_vmexit())
++ enable_sgx = false;
++#endif
++
+ /*
+ * set_apic_access_page_addr() is used to reload apic access
+ * page upon invalidation. No need to do anything if not
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8648799d48f8b..ec2a37ba07e66 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -525,6 +525,7 @@ static int exception_class(int vector)
+ #define EXCPT_TRAP 1
+ #define EXCPT_ABORT 2
+ #define EXCPT_INTERRUPT 3
++#define EXCPT_DB 4
+
+ static int exception_type(int vector)
+ {
+@@ -535,8 +536,14 @@ static int exception_type(int vector)
+
+ mask = 1 << vector;
+
+- /* #DB is trap, as instruction watchpoints are handled elsewhere */
+- if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
++ /*
++ * #DBs can be trap-like or fault-like, the caller must check other CPU
++ * state, e.g. DR6, to determine whether a #DB is a trap or fault.
++ */
++ if (mask & (1 << DB_VECTOR))
++ return EXCPT_DB;
++
++ if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
+ return EXCPT_TRAP;
+
+ if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
+@@ -5763,6 +5770,11 @@ split_irqchip_unlock:
+ r = 0;
+ break;
+ case KVM_CAP_X86_USER_SPACE_MSR:
++ r = -EINVAL;
++ if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
++ KVM_MSR_EXIT_REASON_UNKNOWN |
++ KVM_MSR_EXIT_REASON_FILTER))
++ break;
+ kvm->arch.user_space_msr_mask = cap->args[0];
+ r = 0;
+ break;
+@@ -5883,23 +5895,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
+ return 0;
+ }
+
+-static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
++static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
++ struct kvm_msr_filter *filter)
+ {
+- struct kvm_msr_filter __user *user_msr_filter = argp;
+ struct kvm_x86_msr_filter *new_filter, *old_filter;
+- struct kvm_msr_filter filter;
+ bool default_allow;
+ bool empty = true;
+ int r = 0;
+ u32 i;
+
+- if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
+- return -EFAULT;
++ if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
++ return -EINVAL;
+
+- for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
+- empty &= !filter.ranges[i].nmsrs;
++ for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
++ empty &= !filter->ranges[i].nmsrs;
+
+- default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
++ default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
+ if (empty && !default_allow)
+ return -EINVAL;
+
+@@ -5907,8 +5918,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+ if (!new_filter)
+ return -ENOMEM;
+
+- for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
+- r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
++ for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
++ r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
+ if (r) {
+ kvm_free_msr_filter(new_filter);
+ return r;
+@@ -5931,6 +5942,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+ return 0;
+ }
+
++#ifdef CONFIG_KVM_COMPAT
++/* for KVM_X86_SET_MSR_FILTER */
++struct kvm_msr_filter_range_compat {
++ __u32 flags;
++ __u32 nmsrs;
++ __u32 base;
++ __u32 bitmap;
++};
++
++struct kvm_msr_filter_compat {
++ __u32 flags;
++ struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
++};
++
++#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
++
++long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
++ unsigned long arg)
++{
++ void __user *argp = (void __user *)arg;
++ struct kvm *kvm = filp->private_data;
++ long r = -ENOTTY;
++
++ switch (ioctl) {
++ case KVM_X86_SET_MSR_FILTER_COMPAT: {
++ struct kvm_msr_filter __user *user_msr_filter = argp;
++ struct kvm_msr_filter_compat filter_compat;
++ struct kvm_msr_filter filter;
++ int i;
++
++ if (copy_from_user(&filter_compat, user_msr_filter,
++ sizeof(filter_compat)))
++ return -EFAULT;
++
++ filter.flags = filter_compat.flags;
++ for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
++ struct kvm_msr_filter_range_compat *cr;
++
++ cr = &filter_compat.ranges[i];
++ filter.ranges[i] = (struct kvm_msr_filter_range) {
++ .flags = cr->flags,
++ .nmsrs = cr->nmsrs,
++ .base = cr->base,
++ .bitmap = (__u8 *)(ulong)cr->bitmap,
++ };
++ }
++
++ r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
++ break;
++ }
++ }
++
++ return r;
++}
++#endif
++
+ #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ static int kvm_arch_suspend_notifier(struct kvm *kvm)
+ {
+@@ -6305,9 +6372,16 @@ set_pit2_out:
+ case KVM_SET_PMU_EVENT_FILTER:
+ r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
+ break;
+- case KVM_X86_SET_MSR_FILTER:
+- r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
++ case KVM_X86_SET_MSR_FILTER: {
++ struct kvm_msr_filter __user *user_msr_filter = argp;
++ struct kvm_msr_filter filter;
++
++ if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
++ return -EFAULT;
++
++ r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
+ break;
++ }
+ default:
+ r = -ENOTTY;
+ }
+@@ -8134,6 +8208,12 @@ restart:
+ unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
+ toggle_interruptibility(vcpu, ctxt->interruptibility);
+ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
++
++ /*
++ * Note, EXCPT_DB is assumed to be fault-like as the emulator
++ * only supports code breakpoints and general detect #DB, both
++ * of which are fault-like.
++ */
+ if (!ctxt->have_exception ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+ kvm_rip_write(vcpu, ctxt->eip);
+@@ -9010,6 +9090,11 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
+
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++ trace_kvm_inj_exception(vcpu->arch.exception.nr,
++ vcpu->arch.exception.has_error_code,
++ vcpu->arch.exception.error_code,
++ vcpu->arch.exception.injected);
++
+ if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+ vcpu->arch.exception.error_code = false;
+ static_call(kvm_x86_queue_exception)(vcpu);
+@@ -9067,13 +9152,16 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+
+ /* try to inject new event if pending */
+ if (vcpu->arch.exception.pending) {
+- trace_kvm_inj_exception(vcpu->arch.exception.nr,
+- vcpu->arch.exception.has_error_code,
+- vcpu->arch.exception.error_code);
+-
+- vcpu->arch.exception.pending = false;
+- vcpu->arch.exception.injected = true;
+-
++ /*
++ * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
++ * value pushed on the stack. Trap-like exception and all #DBs
++ * leave RF as-is (KVM follows Intel's behavior in this regard;
++ * AMD states that code breakpoint #DBs excplitly clear RF=0).
++ *
++ * Note, most versions of Intel's SDM and AMD's APM incorrectly
++ * describe the behavior of General Detect #DBs, which are
++ * fault-like. They do _not_ set RF, a la code breakpoints.
++ */
+ if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
+ __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
+ X86_EFLAGS_RF);
+@@ -9087,6 +9175,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+ }
+
+ kvm_inject_exception(vcpu);
++
++ vcpu->arch.exception.pending = false;
++ vcpu->arch.exception.injected = true;
++
+ can_inject = false;
+ }
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 4b862f18f4b2e..7d8fe13573f64 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -461,6 +461,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ */
+ void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
++ lockdep_assert_held(&bfqd->lock);
++
+ if (bfqd->queued != 0) {
+ bfq_log(bfqd, "schedule dispatch");
+ blk_mq_run_hw_queues(bfqd->queue, true);
+@@ -6745,8 +6747,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_bfqq_expire(bfqd, bfqq, true, reason);
+
+ schedule_dispatch:
+- spin_unlock_irqrestore(&bfqd->lock, flags);
+ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ }
+
+ /*
+diff --git a/block/genhd.c b/block/genhd.c
+index 74e19d67ceab5..68065189ca176 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -527,6 +527,7 @@ out_unregister_bdi:
+ bdi_unregister(disk->bdi);
+ out_unregister_queue:
+ blk_unregister_queue(disk);
++ rq_qos_exit(disk->queue);
+ out_put_slave_dir:
+ kobject_put(disk->slave_dir);
+ out_put_holder_dir:
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index d490670f8d55a..8678e162181f4 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
+ clear_fixmap(fixmap_idx);
+ }
+
+-int ghes_estatus_pool_init(int num_ghes)
++int ghes_estatus_pool_init(unsigned int num_ghes)
+ {
+ unsigned long addr, len;
+ int rc;
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 0a8bf09a5c19e..03c580625c2cc 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -315,9 +315,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ outb(inb(0x1F4) & 0x07, 0x1F4);
+
+ rt = inb(0x1F3);
+- rt &= 0x07 << (3 * adev->devno);
++ rt &= ~(0x07 << (3 * !adev->devno));
+ if (pio)
+- rt |= (1 + 3 * pio) << (3 * adev->devno);
++ rt |= (1 + 3 * pio) << (3 * !adev->devno);
++ outb(rt, 0x1F3);
+
+ udelay(100);
+ outb(inb(0x1F2) | 0x01, 0x1F2);
+diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
+index 076e4942a3f0e..612f10456849f 100644
+--- a/drivers/bluetooth/virtio_bt.c
++++ b/drivers/bluetooth/virtio_bt.c
+@@ -219,7 +219,7 @@ static void virtbt_rx_work(struct work_struct *work)
+ if (!skb)
+ return;
+
+- skb->len = len;
++ skb_put(skb, len);
+ virtbt_rx_handle(vbt, skb);
+
+ if (virtbt_add_inbuf(vbt) < 0)
+diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
+index ce7c5ba2b9b7a..d10efbf260b7a 100644
+--- a/drivers/clk/qcom/gcc-sc7280.c
++++ b/drivers/clk/qcom/gcc-sc7280.c
+@@ -3571,6 +3571,7 @@ static int gcc_sc7280_probe(struct platform_device *pdev)
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
++ regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
+index 9a832f2bcf491..1490cd45a654a 100644
+--- a/drivers/clk/qcom/gpucc-sc7280.c
++++ b/drivers/clk/qcom/gpucc-sc7280.c
+@@ -463,6 +463,7 @@ static int gpu_cc_sc7280_probe(struct platform_device *pdev)
+ */
+ regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0));
++ regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13));
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap);
+ }
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index e815b8f987393..16569af4a2ba8 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -1516,8 +1516,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+ {
+ int ret = scmi_chan_setup(info, dev, prot_id, true);
+
+- if (!ret) /* Rx is optional, hence no error check */
+- scmi_chan_setup(info, dev, prot_id, false);
++ if (!ret) {
++ /* Rx is optional, report only memory errors */
++ ret = scmi_chan_setup(info, dev, prot_id, false);
++ if (ret && ret != -ENOMEM)
++ ret = 0;
++ }
+
+ return ret;
+ }
+@@ -2009,6 +2013,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match);
+ static struct platform_driver scmi_driver = {
+ .driver = {
+ .name = "arm-scmi",
++ .suppress_bind_attrs = true,
+ .of_match_table = scmi_of_match,
+ .dev_groups = versions_groups,
+ },
+diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
+index 87039c5c03fdb..0c351eeee7463 100644
+--- a/drivers/firmware/arm_scmi/virtio.c
++++ b/drivers/firmware/arm_scmi/virtio.c
+@@ -247,19 +247,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ for (i = 0; i < vioch->max_msg; i++) {
+ struct scmi_vio_msg *msg;
+
+- msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
++ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (tx) {
+- msg->request = devm_kzalloc(cinfo->dev,
++ msg->request = devm_kzalloc(dev,
+ VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->request)
+ return -ENOMEM;
+ }
+
+- msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
++ msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->input)
+ return -ENOMEM;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index e3df82d5d37a8..70be9c87fb673 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -590,7 +590,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+
+ seed = early_memremap(efi_rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+- size = READ_ONCE(seed->size);
++ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
+index 24aa375353724..33ab567695951 100644
+--- a/drivers/firmware/efi/libstub/random.c
++++ b/drivers/firmware/efi/libstub/random.c
+@@ -75,7 +75,12 @@ efi_status_t efi_random_get_seed(void)
+ if (status != EFI_SUCCESS)
+ return status;
+
+- status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
++ /*
++ * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
++ * allocation will survive a kexec reboot (although we refresh the seed
++ * beforehand)
++ */
++ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+ (void **)&seed);
+ if (status != EFI_SUCCESS)
+diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
+index 8f665678e9e39..e8d69bd548f3f 100644
+--- a/drivers/firmware/efi/tpm.c
++++ b/drivers/firmware/efi/tpm.c
+@@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void)
+ goto out_calc;
+ }
+
+- memblock_reserve((unsigned long)final_tbl,
++ memblock_reserve(efi.tpm_final_log,
+ tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index a0803425b4566..b508126a9738f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -706,6 +706,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+
++ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
++ /* VF MMIO access (except mailbox range) from CPU
++ * will be blocked during sriov runtime
++ */
++ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
++
+ /* we have the ability to check now */
+ if (amdgpu_sriov_vf(adev)) {
+ switch (adev->asic_type) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 9adfb8d63280a..ce31d4fdee935 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -31,6 +31,7 @@
+ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
+ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
+ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
++#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
+
+ /* all asic after AI use this offset */
+ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+@@ -278,6 +279,9 @@ struct amdgpu_video_codec_info;
+ #define amdgpu_passthrough(adev) \
+ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
+
++#define amdgpu_sriov_vf_mmio_access_protection(adev) \
++((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
++
+ static inline bool is_virtual_machine(void)
+ {
+ #ifdef CONFIG_X86
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index fd37bb39774c8..01710cd0d9727 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3224,7 +3224,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ */
+ #ifdef CONFIG_X86_64
+ if (amdgpu_vm_update_mode == -1) {
+- if (amdgpu_gmc_vram_full_visible(&adev->gmc))
++ /* For asic with VF MMIO access protection
++ * avoid using CPU for VM table updates
++ */
++ if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
++ !amdgpu_sriov_vf_mmio_access_protection(adev))
+ adev->vm_manager.vm_update_mode =
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE;
+ else
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 7072fb2ec07fa..278ff281a1bd5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -36,10 +36,14 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
+ {
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
++ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
++ link->psr_settings.psr_feature_enabled = false;
+ return;
+- if (link->type == dc_connection_none)
++ }
++ if (link->type == dc_connection_none) {
++ link->psr_settings.psr_feature_enabled = false;
+ return;
++ }
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 6cb27599ea030..adb1693b15758 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2762,13 +2762,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ if (!intel_sdvo_connector)
+ return false;
+
+- if (device == 0) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
++ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+- } else if (device == 1) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
++ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+- }
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+@@ -2823,7 +2820,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+- intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+@@ -2864,13 +2860,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+- if (device == 0) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
++ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+- } else if (device == 1) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
++ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+- }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+@@ -2900,13 +2893,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+- if (device == 0) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
++ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+- } else if (device == 1) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
++ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+- }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+@@ -2939,16 +2929,39 @@ err:
+ return false;
+ }
+
++static u16 intel_sdvo_filter_output_flags(u16 flags)
++{
++ flags &= SDVO_OUTPUT_MASK;
++
++ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
++ if (!(flags & SDVO_OUTPUT_TMDS0))
++ flags &= ~SDVO_OUTPUT_TMDS1;
++
++ if (!(flags & SDVO_OUTPUT_RGB0))
++ flags &= ~SDVO_OUTPUT_RGB1;
++
++ if (!(flags & SDVO_OUTPUT_LVDS0))
++ flags &= ~SDVO_OUTPUT_LVDS1;
++
++ return flags;
++}
++
+ static bool
+ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+ {
+- /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
++ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
++
++ flags = intel_sdvo_filter_output_flags(flags);
++
++ intel_sdvo->controlled_output = flags;
++
++ intel_sdvo_select_ddc_bus(i915, intel_sdvo);
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+- if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
++ if (flags & SDVO_OUTPUT_TMDS1)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+@@ -2969,7 +2982,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+- if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
++ if (flags & SDVO_OUTPUT_RGB1)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+@@ -2977,14 +2990,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+- if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
++ if (flags & SDVO_OUTPUT_LVDS1)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+- if ((flags & SDVO_OUTPUT_MASK) == 0) {
++ if (flags == 0) {
+ unsigned char bytes[2];
+
+- intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(intel_sdvo),
+@@ -3396,8 +3408,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ */
+ intel_sdvo->base.cloneable = 0;
+
+- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
+-
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_output;
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index e082e01b5e8d1..f6b09e8eca672 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -330,8 +330,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
+ goto fail;
+ }
+
+- ret = devm_request_irq(&pdev->dev, hdmi->irq,
+- msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ ret = devm_request_irq(dev->dev, hdmi->irq,
++ msm_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index 59c3d8ef6bf9a..1129f98fe7f94 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -1027,23 +1027,31 @@ static int dw_mipi_dsi_rockchip_host_attach(void *priv_data,
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n",
+ ret);
+- return ret;
++ goto out;
+ }
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+- if (IS_ERR(second))
+- return PTR_ERR(second);
++ if (IS_ERR(second)) {
++ ret = PTR_ERR(second);
++ goto out;
++ }
+ if (second) {
+ ret = component_add(second, &dw_mipi_dsi_rockchip_ops);
+ if (ret) {
+ DRM_DEV_ERROR(second,
+ "Failed to register component: %d\n",
+ ret);
+- return ret;
++ goto out;
+ }
+ }
+
+ return 0;
++
++out:
++ mutex_lock(&dsi->usage_mutex);
++ dsi->usage_mode = DW_DSI_USAGE_IDLE;
++ mutex_unlock(&dsi->usage_mutex);
++ return ret;
+ }
+
+ static int dw_mipi_dsi_rockchip_host_detach(void *priv_data,
+@@ -1630,5 +1638,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
+ .of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+ .pm = &dw_mipi_dsi_rockchip_pm_ops,
+ .name = "dw-mipi-dsi-rockchip",
++ /*
++ * For dual-DSI display, one DSI pokes at the other DSI's
++ * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
++ * safe for asynchronous probe.
++ */
++ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ };
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index cb2b48d6915ee..c8a313c84a57d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -845,6 +845,7 @@
+ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+ #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705
+ #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
++#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713
+
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+@@ -1109,6 +1110,7 @@
+ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
+ #define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6
++#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2 0x0df2
+ #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
+ #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
+ #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
+diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
+index ab7c82c2e8867..bd0e0fe2f6279 100644
+--- a/drivers/hid/hid-playstation.c
++++ b/drivers/hid/hid-playstation.c
+@@ -1282,7 +1282,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ goto err_stop;
+ }
+
+- if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) {
++ if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER ||
++ hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) {
+ dev = dualsense_create(hdev);
+ if (IS_ERR(dev)) {
+ hid_err(hdev, "Failed to create dualsense.\n");
+@@ -1320,6 +1321,8 @@ static void ps_remove(struct hid_device *hdev)
+ static const struct hid_device_id ps_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ps_devices);
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 544d1197aca48..8d36cb7551cf1 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -608,6 +608,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
+ #endif
+ #if IS_ENABLED(CONFIG_HID_SAMSUNG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
+index c7bf14c019605..b84e975977c42 100644
+--- a/drivers/hid/hid-saitek.c
++++ b/drivers/hid/hid-saitek.c
+@@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = {
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
+ .driver_data = SAITEK_RELEASE_MODE_MMO7 },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
++ .driver_data = SAITEK_RELEASE_MODE_MMO7 },
+ { }
+ };
+
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 39cb1b7bb8656..809fbd014cd68 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -1080,6 +1080,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ "", &piix4_main_adapters[0]);
+ if (retval < 0)
+ return retval;
++ piix4_adapter_count = 1;
+ }
+
+ /* Check for auxiliary SMBus on some AMD chipsets */
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 612343771ce25..34b8da949462a 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -934,6 +934,7 @@ static struct platform_driver xiic_i2c_driver = {
+
+ module_platform_driver(xiic_i2c_driver);
+
++MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_AUTHOR("info@mocean-labs.com");
+ MODULE_DESCRIPTION("Xilinx I2C bus driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 0da66dd40d6a8..fd192104fd8d3 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1433,7 +1433,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
+ return false;
+
+ memset(&fl4, 0, sizeof(fl4));
+- fl4.flowi4_iif = net_dev->ifindex;
++ fl4.flowi4_oif = net_dev->ifindex;
+ fl4.daddr = daddr;
+ fl4.saddr = saddr;
+
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 6ab46648af909..1519379b116e2 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2814,10 +2814,18 @@ static int __init ib_core_init(void)
+
+ nldev_init();
+ rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
+- roce_gid_mgmt_init();
++ ret = roce_gid_mgmt_init();
++ if (ret) {
++ pr_warn("Couldn't init RoCE GID management\n");
++ goto err_parent;
++ }
+
+ return 0;
+
++err_parent:
++ rdma_nl_unregister(RDMA_NL_LS);
++ nldev_exit();
++ unregister_pernet_device(&rdma_dev_net_ops);
+ err_compat:
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ err_sa:
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index e9b4b2cccaa0f..cd89e59cbe332 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2349,7 +2349,7 @@ void __init nldev_init(void)
+ rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
+ }
+
+-void __exit nldev_exit(void)
++void nldev_exit(void)
+ {
+ rdma_nl_unregister(RDMA_NL_NLDEV);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index 3d42bd2b36bd4..51ae58c02b15c 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -913,8 +913,7 @@ void sc_disable(struct send_context *sc)
+ spin_unlock(&sc->release_lock);
+
+ write_seqlock(&sc->waitlock);
+- if (!list_empty(&sc->piowait))
+- list_move(&sc->piowait, &wake_list);
++ list_splice_init(&sc->piowait, &wake_list);
+ write_sequnlock(&sc->waitlock);
+ while (!list_empty(&wake_list)) {
+ struct iowait *wait;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 1dbad159f3792..1421896abaf09 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -82,7 +82,6 @@ static const u32 hns_roce_op_code[] = {
+ HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
+ HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
+ HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
+- HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
+ HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
+ HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
+ HR_OPC_MAP(REG_MR, FAST_REG_PMR),
+@@ -149,8 +148,7 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
+ aseg->cmp_data = 0;
+ }
+
+- roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
+ }
+
+ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+@@ -271,8 +269,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
+ dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
+
+ if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
+- roce_set_bit(rc_sq_wqe->byte_20,
+- V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
++ hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dseg, ((void *)wr->sg_list[i].addr),
+@@ -280,17 +277,13 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
+ dseg += wr->sg_list[i].length;
+ }
+ } else {
+- roce_set_bit(rc_sq_wqe->byte_20,
+- V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
++ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
+
+ ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
+ if (ret)
+ return ret;
+
+- roce_set_field(rc_sq_wqe->byte_16,
+- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+- curr_idx - *sge_idx);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
+ }
+
+ *sge_idx = curr_idx;
+@@ -309,12 +302,10 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ int j = 0;
+ int i;
+
+- roce_set_field(rc_sq_wqe->byte_20,
+- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+- (*sge_ind) & (qp->sge.sge_cnt - 1));
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
++ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
+ !!(wr->send_flags & IB_SEND_INLINE));
+ if (wr->send_flags & IB_SEND_INLINE)
+ return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
+@@ -339,9 +330,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ valid_num_sge - HNS_ROCE_SGE_IN_WQE);
+ }
+
+- roce_set_field(rc_sq_wqe->byte_16,
+- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
+
+ return 0;
+ }
+@@ -412,8 +401,7 @@ static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+
+ ud_sq_wqe->immtdata = get_immtdata(wr);
+
+- roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+- V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
+
+ return 0;
+ }
+@@ -424,21 +412,15 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ struct ib_device *ib_dev = ah->ibah.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+- roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+- V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
+-
+- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
+- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+- V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
+- roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
+
+ if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
+ return -EINVAL;
+
+- roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
+- V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
+
+ ud_sq_wqe->sgid_index = ah->av.gid_index;
+
+@@ -448,10 +430,8 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
+- roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+- ah->av.vlan_en);
+- roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+- V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
+
+ return 0;
+ }
+@@ -476,27 +456,19 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+- roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
+ !!(wr->send_flags & IB_SEND_SIGNALED));
+-
+- roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+
+- roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
+- V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
+-
+- roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+- V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
+-
+- roce_set_field(ud_sq_wqe->byte_20,
+- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+- curr_idx & (qp->sge.sge_cnt - 1));
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
++ curr_idx & (qp->sge.sge_cnt - 1));
+
+ ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey);
+- roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+- V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
+
+ ret = fill_ud_av(ud_sq_wqe, ah);
+ if (ret)
+@@ -516,8 +488,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ dma_wmb();
+
+ *sge_idx = curr_idx;
+- roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
+- owner_bit);
++ hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
+
+ return 0;
+ }
+@@ -552,9 +523,6 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev,
+ else
+ ret = -EOPNOTSUPP;
+ break;
+- case IB_WR_LOCAL_INV:
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+- fallthrough;
+ case IB_WR_SEND_WITH_INV:
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+@@ -565,11 +533,11 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev,
+ if (unlikely(ret))
+ return ret;
+
+- roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+- V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
+
+ return ret;
+ }
++
+ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+@@ -590,13 +558,13 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ if (WARN_ON(ret))
+ return ret;
+
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+@@ -616,8 +584,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ dma_wmb();
+
+ *sge_idx = curr_idx;
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+- owner_bit);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
+
+ return ret;
+ }
+@@ -678,16 +645,15 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
+ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ void *wqe)
+ {
++#define HNS_ROCE_SL_SHIFT 2
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
+ /* All kinds of DirectWQE have the same header field layout */
+- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
+- roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
+- V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
+- roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
+- V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+- roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
+- V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
++ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
++ qp->sl >> HNS_ROCE_SL_SHIFT);
++ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
+
+ hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
+ }
+@@ -1783,17 +1749,16 @@ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ swt = (struct hns_roce_vf_switch *)desc.data;
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
+ swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
+- roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
+- VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
++ hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
+ desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+- roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
+- roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
+- roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
++ hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
++ hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
++ hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
+@@ -2942,10 +2907,8 @@ static int config_sgid_table(struct hns_roce_dev *hr_dev,
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
+
+- roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
+- CFG_SGID_TB_TABLE_IDX_S, gid_index);
+- roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
+- CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
++ hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
++ hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
+
+ copy_gid(&sgid_tb->vf_sgid_l, gid);
+
+@@ -2980,19 +2943,14 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
+
+ copy_gid(&tb_a->vf_sgid_l, gid);
+
+- roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
+- CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
+- roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
+- vlan_id < VLAN_CFI_MASK);
+- roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
+- CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
++ hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
++ hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
++ hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
+
+ tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
+- roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
+- CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
+
+- roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
+- CFG_GMV_TB_SGID_IDX_S, gid_index);
++ hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
++ hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+ }
+@@ -3041,10 +2999,8 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ reg_smac_l = *(u32 *)(&addr[0]);
+ reg_smac_h = *(u16 *)(&addr[4]);
+
+- roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
+- CFG_SMAC_TB_IDX_S, phy_port);
+- roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
+- CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
++ hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
++ hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
+ smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+@@ -3073,21 +3029,15 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
+- roce_set_field(mpt_entry->byte_48_mode_ba,
+- V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+- upper_32_bits(pbl_ba >> 3));
++ hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
+
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+- roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+- V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
++ hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
+
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+- roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+- V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+- roce_set_field(mpt_entry->byte_64_buf_pa1,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
++ hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
++ hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
++ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+
+ return 0;
+ }
+@@ -3104,7 +3054,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+- hr_reg_enable(mpt_entry, MPT_L_INV_EN);
+
+ hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
+ mr->access & IB_ACCESS_MW_BIND);
+@@ -3149,24 +3098,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
+ u32 mr_access_flags = mr->access;
+ int ret = 0;
+
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+- V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
+-
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+- V2_MPT_BYTE_4_PD_S, mr->pd);
++ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
++ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+ if (flags & IB_MR_REREG_ACCESS) {
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+- V2_MPT_BYTE_8_BIND_EN_S,
++ hr_reg_write(mpt_entry, MPT_BIND_EN,
+ (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+- V2_MPT_BYTE_8_ATOMIC_EN_S,
++ hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
++ hr_reg_write(mpt_entry, MPT_RR_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
++ hr_reg_write(mpt_entry, MPT_RW_EN,
+ mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
++ hr_reg_write(mpt_entry, MPT_LW_EN,
+ mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+ }
+
+@@ -3197,37 +3141,27 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+ return -ENOBUFS;
+ }
+
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+- V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+- V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
+- roce_set_field(mpt_entry->byte_4_pd_hop_st,
+- V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+- V2_MPT_BYTE_4_PD_S, mr->pd);
++ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
++ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
++ hr_reg_enable(mpt_entry, MPT_RA_EN);
++ hr_reg_enable(mpt_entry, MPT_R_INV_EN);
+
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
++ hr_reg_enable(mpt_entry, MPT_FRE);
++ hr_reg_clear(mpt_entry, MPT_MR_MW);
++ hr_reg_enable(mpt_entry, MPT_BPD);
++ hr_reg_clear(mpt_entry, MPT_PA);
++
++ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
++ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
++ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
++ hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
++ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
+- roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+- V2_MPT_BYTE_48_PBL_BA_H_S,
+- upper_32_bits(pbl_ba >> 3));
+-
+- roce_set_field(mpt_entry->byte_64_buf_pa1,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
++ hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
+
+ return 0;
+ }
+@@ -3239,36 +3173,28 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+- V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+- V2_MPT_BYTE_4_PD_S, mw->pdn);
+- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+- V2_MPT_BYTE_4_PBL_HOP_NUM_S,
+- mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
+- mw->pbl_hop_num);
+- roce_set_field(mpt_entry->byte_4_pd_hop_st,
+- V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+- mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+-
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
+-
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
+- mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
++ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
++ hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
+
+- roce_set_field(mpt_entry->byte_64_buf_pa1,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+- mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
++ hr_reg_enable(mpt_entry, MPT_R_INV_EN);
++ hr_reg_enable(mpt_entry, MPT_LW_EN);
++
++ hr_reg_enable(mpt_entry, MPT_MR_MW);
++ hr_reg_enable(mpt_entry, MPT_BPD);
++ hr_reg_clear(mpt_entry, MPT_PA);
++ hr_reg_write(mpt_entry, MPT_BQP,
++ mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
+
+ mpt_entry->lkey = cpu_to_le32(mw->rkey);
+
++ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
++ mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
++ mw->pbl_hop_num);
++ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
++ mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
++ hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
++ mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
++
+ return 0;
+ }
+
+@@ -3607,7 +3533,6 @@ static const u32 wc_send_op_map[] = {
+ HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
+ HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
+- HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
+ HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
+ HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
+@@ -3657,9 +3582,6 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+ case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+- case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
+- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+- break;
+ case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index d3d5b5f57052c..2f4a0019a716d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -184,7 +184,6 @@ enum {
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
+- HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb,
+ HNS_ROCE_V2_WQE_OP_BIND_MW = 0xc,
+ HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
+ };
+@@ -790,12 +789,15 @@ struct hns_roce_v2_mpt_entry {
+ #define MPT_LKEY MPT_FIELD_LOC(223, 192)
+ #define MPT_VA MPT_FIELD_LOC(287, 224)
+ #define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
+-#define MPT_PBL_BA MPT_FIELD_LOC(380, 320)
++#define MPT_PBL_BA_L MPT_FIELD_LOC(351, 320)
++#define MPT_PBL_BA_H MPT_FIELD_LOC(380, 352)
+ #define MPT_BLK_MODE MPT_FIELD_LOC(381, 381)
+ #define MPT_RSV0 MPT_FIELD_LOC(383, 382)
+-#define MPT_PA0 MPT_FIELD_LOC(441, 384)
++#define MPT_PA0_L MPT_FIELD_LOC(415, 384)
++#define MPT_PA0_H MPT_FIELD_LOC(441, 416)
+ #define MPT_BOUND_VA MPT_FIELD_LOC(447, 442)
+-#define MPT_PA1 MPT_FIELD_LOC(505, 448)
++#define MPT_PA1_L MPT_FIELD_LOC(479, 448)
++#define MPT_PA1_H MPT_FIELD_LOC(505, 480)
+ #define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506)
+ #define MPT_RSV2 MPT_FIELD_LOC(507, 507)
+ #define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508)
+@@ -901,48 +903,24 @@ struct hns_roce_v2_ud_send_wqe {
+ u8 dgid[GID_LEN_V2];
+ };
+
+-#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+-#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
+-
+-#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8
+-
+-#define V2_UD_SEND_WQE_BYTE_4_SE_S 11
+-
+-#define V2_UD_SEND_WQE_BYTE_16_PD_S 0
+-#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24
+-#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+-
+-#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+-#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16
+-#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16)
+-
+-#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0
+-#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0
+-#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16
+-#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16)
+-
+-#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24
+-#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24)
+-
+-#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0
+-#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0)
+-
+-#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
+-#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
+-
+-#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
+-
+-#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
++#define UD_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_ud_send_wqe, h, l)
++
++#define UD_SEND_WQE_OPCODE UD_SEND_WQE_FIELD_LOC(4, 0)
++#define UD_SEND_WQE_OWNER UD_SEND_WQE_FIELD_LOC(7, 7)
++#define UD_SEND_WQE_CQE UD_SEND_WQE_FIELD_LOC(8, 8)
++#define UD_SEND_WQE_SE UD_SEND_WQE_FIELD_LOC(11, 11)
++#define UD_SEND_WQE_PD UD_SEND_WQE_FIELD_LOC(119, 96)
++#define UD_SEND_WQE_SGE_NUM UD_SEND_WQE_FIELD_LOC(127, 120)
++#define UD_SEND_WQE_MSG_START_SGE_IDX UD_SEND_WQE_FIELD_LOC(151, 128)
++#define UD_SEND_WQE_UDPSPN UD_SEND_WQE_FIELD_LOC(191, 176)
++#define UD_SEND_WQE_DQPN UD_SEND_WQE_FIELD_LOC(247, 224)
++#define UD_SEND_WQE_VLAN UD_SEND_WQE_FIELD_LOC(271, 256)
++#define UD_SEND_WQE_HOPLIMIT UD_SEND_WQE_FIELD_LOC(279, 272)
++#define UD_SEND_WQE_TCLASS UD_SEND_WQE_FIELD_LOC(287, 280)
++#define UD_SEND_WQE_FLOW_LABEL UD_SEND_WQE_FIELD_LOC(307, 288)
++#define UD_SEND_WQE_SL UD_SEND_WQE_FIELD_LOC(311, 308)
++#define UD_SEND_WQE_VLAN_EN UD_SEND_WQE_FIELD_LOC(318, 318)
++#define UD_SEND_WQE_LBI UD_SEND_WQE_FIELD_LOC(319, 319)
+
+ struct hns_roce_v2_rc_send_wqe {
+ __le32 byte_4;
+@@ -957,42 +935,22 @@ struct hns_roce_v2_rc_send_wqe {
+ __le64 va;
+ };
+
+-#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
+-#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+-
+-#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5
+-#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5)
+-
+-#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13
+-#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13)
+-
+-#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15
+-#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15)
+-
+-#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
+-
+-#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
+-
+-#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9
+-
+-#define V2_RC_SEND_WQE_BYTE_4_SO_S 10
+-
+-#define V2_RC_SEND_WQE_BYTE_4_SE_S 11
+-
+-#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+-
+-#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31
+-
+-#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
+-#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
+-
+-#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24
+-#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+-
+-#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+-#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+-
+-#define V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S 31
++#define RC_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_rc_send_wqe, h, l)
++
++#define RC_SEND_WQE_OPCODE RC_SEND_WQE_FIELD_LOC(4, 0)
++#define RC_SEND_WQE_DB_SL_L RC_SEND_WQE_FIELD_LOC(6, 5)
++#define RC_SEND_WQE_DB_SL_H RC_SEND_WQE_FIELD_LOC(14, 13)
++#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7)
++#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8)
++#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9)
++#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11)
++#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12)
++#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15)
++#define RC_SEND_WQE_FLAG RC_SEND_WQE_FIELD_LOC(31, 31)
++#define RC_SEND_WQE_XRC_SRQN RC_SEND_WQE_FIELD_LOC(119, 96)
++#define RC_SEND_WQE_SGE_NUM RC_SEND_WQE_FIELD_LOC(127, 120)
++#define RC_SEND_WQE_MSG_START_SGE_IDX RC_SEND_WQE_FIELD_LOC(151, 128)
++#define RC_SEND_WQE_INL_TYPE RC_SEND_WQE_FIELD_LOC(159, 159)
+
+ struct hns_roce_wqe_frmr_seg {
+ __le32 pbl_size;
+@@ -1114,12 +1072,12 @@ struct hns_roce_vf_switch {
+ __le32 resv3;
+ };
+
+-#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3
+-#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3)
++#define VF_SWITCH_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_vf_switch, h, l)
+
+-#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1
+-#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2
+-#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3
++#define VF_SWITCH_VF_ID VF_SWITCH_FIELD_LOC(42, 35)
++#define VF_SWITCH_ALW_LPBK VF_SWITCH_FIELD_LOC(65, 65)
++#define VF_SWITCH_ALW_LCL_LPBK VF_SWITCH_FIELD_LOC(66, 66)
++#define VF_SWITCH_ALW_DST_OVRD VF_SWITCH_FIELD_LOC(67, 67)
+
+ struct hns_roce_post_mbox {
+ __le32 in_param_l;
+@@ -1182,11 +1140,10 @@ struct hns_roce_cfg_sgid_tb {
+ __le32 vf_sgid_type_rsv;
+ };
+
+-#define CFG_SGID_TB_TABLE_IDX_S 0
+-#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
++#define SGID_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_sgid_tb, h, l)
+
+-#define CFG_SGID_TB_VF_SGID_TYPE_S 0
+-#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0)
++#define CFG_SGID_TB_TABLE_IDX SGID_TB_FIELD_LOC(7, 0)
++#define CFG_SGID_TB_VF_SGID_TYPE SGID_TB_FIELD_LOC(161, 160)
+
+ struct hns_roce_cfg_smac_tb {
+ __le32 tb_idx_rsv;
+@@ -1194,11 +1151,11 @@ struct hns_roce_cfg_smac_tb {
+ __le32 vf_smac_h_rsv;
+ __le32 rsv[3];
+ };
+-#define CFG_SMAC_TB_IDX_S 0
+-#define CFG_SMAC_TB_IDX_M GENMASK(7, 0)
+
+-#define CFG_SMAC_TB_VF_SMAC_H_S 0
+-#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
++#define SMAC_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_smac_tb, h, l)
++
++#define CFG_SMAC_TB_IDX SMAC_TB_FIELD_LOC(7, 0)
++#define CFG_SMAC_TB_VF_SMAC_H SMAC_TB_FIELD_LOC(79, 64)
+
+ struct hns_roce_cfg_gmv_tb_a {
+ __le32 vf_sgid_l;
+@@ -1209,16 +1166,11 @@ struct hns_roce_cfg_gmv_tb_a {
+ __le32 resv;
+ };
+
+-#define CFG_GMV_TB_SGID_IDX_S 0
+-#define CFG_GMV_TB_SGID_IDX_M GENMASK(7, 0)
+-
+-#define CFG_GMV_TB_VF_SGID_TYPE_S 0
+-#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0)
++#define GMV_TB_A_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_a, h, l)
+
+-#define CFG_GMV_TB_VF_VLAN_EN_S 2
+-
+-#define CFG_GMV_TB_VF_VLAN_ID_S 16
+-#define CFG_GMV_TB_VF_VLAN_ID_M GENMASK(27, 16)
++#define GMV_TB_A_VF_SGID_TYPE GMV_TB_A_FIELD_LOC(129, 128)
++#define GMV_TB_A_VF_VLAN_EN GMV_TB_A_FIELD_LOC(130, 130)
++#define GMV_TB_A_VF_VLAN_ID GMV_TB_A_FIELD_LOC(155, 144)
+
+ struct hns_roce_cfg_gmv_tb_b {
+ __le32 vf_smac_l;
+@@ -1227,8 +1179,10 @@ struct hns_roce_cfg_gmv_tb_b {
+ __le32 resv[3];
+ };
+
+-#define CFG_GMV_TB_SMAC_H_S 0
+-#define CFG_GMV_TB_SMAC_H_M GENMASK(15, 0)
++#define GMV_TB_B_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_b, h, l)
++
++#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32)
++#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64)
+
+ #define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
+ struct hns_roce_query_pf_caps_a {
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index 755930be01b8e..6b59f97a182b0 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -345,6 +345,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+ if (IS_IWARP(dev)) {
+ xa_init(&dev->qps);
+ dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
++ if (!dev->iwarp_wq) {
++ rc = -ENOMEM;
++ goto err1;
++ }
+ }
+
+ /* Allocate Status blocks for CNQ */
+@@ -352,7 +356,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+- goto err1;
++ goto err_destroy_wq;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+@@ -403,6 +407,9 @@ err3:
+ kfree(dev->cnq_array);
+ err2:
+ kfree(dev->sb_array);
++err_destroy_wq:
++ if (IS_IWARP(dev))
++ destroy_workqueue(dev->iwarp_wq);
+ err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
+index a52f275f82634..f8447135a9022 100644
+--- a/drivers/isdn/hardware/mISDN/netjet.c
++++ b/drivers/isdn/hardware/mISDN/netjet.c
+@@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card)
+ }
+ if (card->irq > 0)
+ free_irq(card->irq, card);
+- if (card->isac.dch.dev.dev.class)
++ if (device_is_registered(&card->isac.dch.dev.dev))
+ mISDN_unregister_device(&card->isac.dch.dev);
+
+ for (i = 0; i < 2; i++) {
+diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
+index a41b4b2645941..7ea0100f218a0 100644
+--- a/drivers/isdn/mISDN/core.c
++++ b/drivers/isdn/mISDN/core.c
+@@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev,
+ if (debug & DEBUG_CORE)
+ printk(KERN_DEBUG "mISDN_register %s %d\n",
+ dev_name(&dev->dev), dev->id);
++ dev->dev.class = &mISDN_class;
++
+ err = create_stack(dev);
+ if (err)
+ goto error1;
+
+- dev->dev.class = &mISDN_class;
+ dev->dev.platform_data = dev;
+ dev->dev.parent = parent;
+ dev_set_drvdata(&dev->dev, dev);
+@@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev,
+
+ error3:
+ delete_stack(dev);
+- return err;
+ error1:
++ put_device(&dev->dev);
+ return err;
+
+ }
+diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+index 2d95e16cd2489..f66699d5dc66e 100644
+--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
++++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+@@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
+ uint8_t *cec_message = cros_ec->event_data.data.cec_message;
+ unsigned int len = cros_ec->event_size;
+
++ if (len > CEC_MAX_MSG_SIZE)
++ len = CEC_MAX_MSG_SIZE;
+ cros_ec_cec->rx_msg.len = len;
+ memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+
+diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
+index 028a09a7531ef..102f1af01000a 100644
+--- a/drivers/media/cec/platform/s5p/s5p_cec.c
++++ b/drivers/media/cec/platform/s5p/s5p_cec.c
+@@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+ dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
+ cec->rx = STATE_BUSY;
+ cec->msg.len = status >> 24;
++ if (cec->msg.len > CEC_MAX_MSG_SIZE)
++ cec->msg.len = CEC_MAX_MSG_SIZE;
+ cec->msg.rx_status = CEC_RX_STATUS_OK;
+ s5p_cec_get_rx_buf(cec, cec->msg.len,
+ cec->msg.msg);
+diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
+index d7fc2595f15b8..efe92eef67db6 100644
+--- a/drivers/media/dvb-frontends/drxk_hard.c
++++ b/drivers/media/dvb-frontends/drxk_hard.c
+@@ -6673,7 +6673,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
+ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+ {
+ struct drxk_state *state = fe->demodulator_priv;
+- u16 err;
++ u16 err = 0;
+
+ dprintk(1, "\n");
+
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+index 41988eb0ec0a5..0f980f68058c0 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+@@ -1270,11 +1270,12 @@ static int rkisp1_capture_link_validate(struct media_link *link)
+ struct rkisp1_capture *cap = video_get_drvdata(vdev);
+ const struct rkisp1_capture_fmt_cfg *fmt =
+ rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
+- struct v4l2_subdev_format sd_fmt;
++ struct v4l2_subdev_format sd_fmt = {
++ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
++ .pad = link->source->index,
++ };
+ int ret;
+
+- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+- sd_fmt.pad = link->source->index;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+index 8fa5b0abf1f9c..e0e7d0b4ea047 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+@@ -275,7 +275,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
+ RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4);
+
+ /* program x grad tables */
+- data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2],
++ data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->x_grad_tbl[i * 2],
+ arg->x_grad_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4);
+@@ -287,7 +287,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
+ RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4);
+
+ /* program y grad tables */
+- data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2],
++ data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->y_grad_tbl[i * 2],
+ arg->y_grad_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4);
+@@ -751,7 +751,7 @@ static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
+ }
+ }
+
+-static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
++static void rkisp1_csm_config(struct rkisp1_params *params)
+ {
+ static const u16 full_range_coeff[] = {
+ 0x0026, 0x004b, 0x000f,
+@@ -765,7 +765,7 @@ static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
+ };
+ unsigned int i;
+
+- if (full_range) {
++ if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) {
+ for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++)
+ rkisp1_write(params->rkisp1, full_range_coeff[i],
+ RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+@@ -1235,11 +1235,7 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ rkisp1_hst_params_default_config.mode);
+
+- /* set the range */
+- if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+- rkisp1_csm_config(params, true);
+- else
+- rkisp1_csm_config(params, false);
++ rkisp1_csm_config(params);
+
+ spin_lock_irq(&params->config_lock);
+
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+index fa33080f51db5..f584ccfe0286f 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+@@ -480,7 +480,7 @@
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+ #define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+-#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \
++#define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1) \
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+
+ /* LSC: ISP_LSC_TABLE_SEL */
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+index 2070f4b067059..a166ede409675 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+@@ -510,6 +510,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
+ sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = RKISP1_DEF_FMT;
++ sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
++ sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
++ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
++ sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
+diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c
+index 6012a10f10c83..13daf9bffd081 100644
+--- a/drivers/mtd/parsers/bcm47xxpart.c
++++ b/drivers/mtd/parsers/bcm47xxpart.c
+@@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ }
+
+ /* Read middle of the block */
+- err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
++ err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+- offset, err);
++ offset + (blocksize / 2), err);
+ continue;
+ }
+
+diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
+index e638e3eea9112..e6e1054339b59 100644
+--- a/drivers/net/dsa/dsa_loop.c
++++ b/drivers/net/dsa/dsa_loop.c
+@@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = {
+
+ #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
++static void dsa_loop_phydevs_unregister(void)
++{
++ unsigned int i;
++
++ for (i = 0; i < NUM_FIXED_PHYS; i++)
++ if (!IS_ERR(phydevs[i])) {
++ fixed_phy_unregister(phydevs[i]);
++ phy_device_free(phydevs[i]);
++ }
++}
++
+ static int __init dsa_loop_init(void)
+ {
+ struct fixed_phy_status status = {
+@@ -383,23 +394,23 @@ static int __init dsa_loop_init(void)
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ };
+- unsigned int i;
++ unsigned int i, ret;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+
+- return mdio_driver_register(&dsa_loop_drv);
++ ret = mdio_driver_register(&dsa_loop_drv);
++ if (ret)
++ dsa_loop_phydevs_unregister();
++
++ return ret;
+ }
+ module_init(dsa_loop_init);
+
+ static void __exit dsa_loop_exit(void)
+ {
+- unsigned int i;
+-
+ mdio_driver_unregister(&dsa_loop_drv);
+- for (i = 0; i < NUM_FIXED_PHYS; i++)
+- if (!IS_ERR(phydevs[i]))
+- fixed_phy_unregister(phydevs[i]);
++ dsa_loop_phydevs_unregister();
+ }
+ module_exit(dsa_loop_exit);
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 313ae81120679..a829ba128b9d5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -656,7 +656,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+- return NETDEV_TX_BUSY;
++ return NETDEV_TX_OK;
+ }
+
+ bdp->cbd_datlen = cpu_to_fec16(size);
+@@ -718,7 +718,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+- return NETDEV_TX_BUSY;
++ return NETDEV_TX_OK;
+ }
+ }
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 4a070724a8fb6..8a92c6a6e764f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2621,19 +2621,19 @@ static void __ibmvnic_reset(struct work_struct *work)
+ rwi = get_next_rwi(adapter);
+
+ /*
+- * If there is another reset queued, free the previous rwi
+- * and process the new reset even if previous reset failed
+- * (the previous reset could have failed because of a fail
+- * over for instance, so process the fail over).
+- *
+ * If there are no resets queued and the previous reset failed,
+ * the adapter would be in an undefined state. So retry the
+ * previous reset as a hard reset.
++ *
++ * Else, free the previous rwi and, if there is another reset
++ * queued, process the new reset even if previous reset failed
++ * (the previous reset could have failed because of a fail
++ * over for instance, so process the fail over).
+ */
+- if (rwi)
+- kfree(tmprwi);
+- else if (rc)
++ if (!rwi && rc)
+ rwi = tmprwi;
++ else
++ kfree(tmprwi);
+
+ if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
+ rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index ecf759ee1c9f5..220bb454626ce 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -51,7 +51,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ struct stmmac_resources res;
+ struct device_node *np;
+ int ret, i, phy_mode;
+- bool mdio = false;
+
+ np = dev_of_node(&pdev->dev);
+
+@@ -69,12 +68,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ if (!plat)
+ return -ENOMEM;
+
++ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+- dev_err(&pdev->dev, "Found MDIO subnode\n");
+- mdio = true;
+- }
++ dev_info(&pdev->dev, "Found MDIO subnode\n");
+
+- if (mdio) {
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 2c0216fe58def..dd7739b5f7918 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -577,7 +577,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+- if ((bus->phy_mask & (1 << i)) == 0) {
++ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bus, i);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index f92d6a12831fe..9909f430d723f 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1445,7 +1445,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
+ int err;
+ int i;
+
+- if (it->nr_segs > MAX_SKB_FRAGS + 1)
++ if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
++ len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
+ return ERR_PTR(-EMSGSIZE);
+
+ local_bh_disable();
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+index bc3f4e4edcdf9..dac7eb77799bd 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+@@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
+ brcmf_fweh_event_name(event->code), event->code,
+ event->emsg.ifidx, event->emsg.bsscfgidx,
+ event->emsg.addr);
++ if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
++ bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
++ goto event_free;
++ }
+
+ /* convert event message */
+ emsg_be = &event->emsg;
+diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
+index c6b3334f24c9e..f12f903a9dd13 100644
+--- a/drivers/nfc/fdp/fdp.c
++++ b/drivers/nfc/fdp/fdp.c
+@@ -249,11 +249,19 @@ static int fdp_nci_close(struct nci_dev *ndev)
+ static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+ {
+ struct fdp_nci_info *info = nci_get_drvdata(ndev);
++ int ret;
+
+ if (atomic_dec_and_test(&info->data_pkt_counter))
+ info->data_pkt_counter_cb(ndev);
+
+- return info->phy_ops->write(info->phy, skb);
++ ret = info->phy_ops->write(info->phy, skb);
++ if (ret < 0) {
++ kfree_skb(skb);
++ return ret;
++ }
++
++ consume_skb(skb);
++ return 0;
+ }
+
+ static int fdp_nci_request_firmware(struct nci_dev *ndev)
+diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
+index 01329b91d59d5..a902720cd8493 100644
+--- a/drivers/nfc/nfcmrvl/i2c.c
++++ b/drivers/nfc/nfcmrvl/i2c.c
+@@ -132,10 +132,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
+ ret = -EREMOTEIO;
+ } else
+ ret = 0;
++ }
++
++ if (ret) {
+ kfree_skb(skb);
++ return ret;
+ }
+
+- return ret;
++ consume_skb(skb);
++ return 0;
+ }
+
+ static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
+diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
+index 518e2afb43a8d..13c433eb694dc 100644
+--- a/drivers/nfc/nxp-nci/core.c
++++ b/drivers/nfc/nxp-nci/core.c
+@@ -77,10 +77,13 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+ return -EINVAL;
+
+ r = info->phy_ops->write(info->phy_id, skb);
+- if (r < 0)
++ if (r < 0) {
+ kfree_skb(skb);
++ return r;
++ }
+
+- return r;
++ consume_skb(skb);
++ return 0;
+ }
+
+ static const struct nci_ops nxp_nci_ops = {
+diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
+index 1c412007fabb6..0270e05b68dff 100644
+--- a/drivers/nfc/s3fwrn5/core.c
++++ b/drivers/nfc/s3fwrn5/core.c
+@@ -110,11 +110,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+ }
+
+ ret = s3fwrn5_write(info, skb);
+- if (ret < 0)
++ if (ret < 0) {
+ kfree_skb(skb);
++ mutex_unlock(&info->mutex);
++ return ret;
++ }
+
++ consume_skb(skb);
+ mutex_unlock(&info->mutex);
+- return ret;
++ return 0;
+ }
+
+ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
+index 8a3b0c3a1e92b..fd99735dca3e6 100644
+--- a/drivers/parisc/iosapic.c
++++ b/drivers/parisc/iosapic.c
+@@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev)
+
+ return vi->txn_irq;
+ }
++EXPORT_SYMBOL(iosapic_serial_irq);
+ #endif
+
+
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index c278097926093..2ba9e01355659 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -792,9 +792,8 @@ static int __unset_online(struct device *dev, void *data)
+ {
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
+- struct ccw_device *cdev = sch_get_cdev(sch);
+
+- if (cdev && cdev->online)
++ if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
+ idset_sch_del(set, sch->schid);
+
+ return 0;
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index b2508a00bafdf..457ff86e02b30 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1558,10 +1558,7 @@ struct lpfc_hba {
+ u32 cgn_acqe_cnt;
+
+ /* RX monitor handling for CMF */
+- struct rxtable_entry *rxtable; /* RX_monitor information */
+- atomic_t rxtable_idx_head;
+-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
+- atomic_t rxtable_idx_tail;
++ struct lpfc_rx_info_monitor *rx_monitor;
+ atomic_t rx_max_read_cnt; /* Maximum read bytes */
+ uint64_t rx_block_cnt;
+
+@@ -1610,7 +1607,8 @@ struct lpfc_hba {
+
+ #define LPFC_MAX_RXMONITOR_ENTRY 800
+ #define LPFC_MAX_RXMONITOR_DUMP 32
+-struct rxtable_entry {
++struct rx_info_entry {
++ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
+ uint64_t total_bytes; /* Total no of read bytes requested */
+ uint64_t rcv_bytes; /* Total no of read bytes completed */
+ uint64_t avg_io_size;
+@@ -1624,6 +1622,13 @@ struct rxtable_entry {
+ uint32_t timer_interval;
+ };
+
++struct lpfc_rx_info_monitor {
++ struct rx_info_entry *ring; /* info organized in a circular buffer */
++ u32 head_idx, tail_idx; /* index to head/tail of ring */
++ spinlock_t lock; /* spinlock for ring */
++ u32 entries; /* storing number entries/size of ring */
++};
++
+ static inline struct Scsi_Host *
+ lpfc_shost_from_vport(struct lpfc_vport *vport)
+ {
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index c9770b1d2366a..470239394e649 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -90,6 +90,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
+ void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
+ void lpfc_unblock_requests(struct lpfc_hba *phba);
+ void lpfc_block_requests(struct lpfc_hba *phba);
++int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
++ u32 entries);
++void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
++void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
++ struct rx_info_entry *entry);
++u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
++ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
++ u32 buf_len, u32 max_read_entries);
+
+ void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 79bc86ba59b35..8e8bbe734e875 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -5520,7 +5520,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
+ if (!debug)
+ goto out;
+
+- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
++ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+@@ -5541,55 +5541,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+- struct rxtable_entry *entry;
+- int i, len = 0, head, tail, last, start;
+-
+- head = atomic_read(&phba->rxtable_idx_head);
+- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
+- /* Table is getting updated */
+- msleep(20);
+- head = atomic_read(&phba->rxtable_idx_head);
+- }
+
+- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
+- if (!phba->rxtable || head == tail) {
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- "Rxtable is empty\n");
+- goto out;
+- }
+- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
+- start = tail;
+-
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- " MaxBPI\t Total Data Cmd Total Data Cmpl "
+- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
+- "Info BWutil(ms)\n");
+-get_table:
+- for (i = start; i < last; i++) {
+- entry = &phba->rxtable[i];
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- "%3d:%12lld %12lld\t%12lld\t"
+- "%8lldus\t%8lld\t%10lld "
+- "%8d %2d %2d(%2d)\n",
+- i, entry->max_bytes_per_interval,
+- entry->total_bytes,
+- entry->rcv_bytes,
+- entry->avg_io_latency,
+- entry->avg_io_size,
+- entry->max_read_cnt,
+- entry->io_cnt,
+- entry->cmf_info,
+- entry->timer_utilization,
+- entry->timer_interval);
++ if (!phba->rx_monitor) {
++ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
++ "Rx Monitor Info is empty.\n");
++ } else {
++ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
++ MAX_DEBUGFS_RX_INFO_SIZE,
++ LPFC_MAX_RXMONITOR_ENTRY);
+ }
+
+- if (head != last) {
+- start = 0;
+- last = head;
+- goto get_table;
+- }
+-out:
+- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
++ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
++ strlen(buffer));
+ }
+
+ static int
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
+index a5bf71b349720..f71e5b6311ac0 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.h
++++ b/drivers/scsi/lpfc/lpfc_debugfs.h
+@@ -282,7 +282,7 @@ struct lpfc_idiag {
+ void *ptr_private;
+ };
+
+-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
++#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+ struct lpfc_rx_monitor_debug {
+ char *i_private;
+ char *buffer;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index fe8b4258cdc05..855817f6fe671 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -5428,38 +5428,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
+ void
+ lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
+ {
+- struct rxtable_entry *entry;
+- int cnt = 0, head, tail, last, start;
+-
+- head = atomic_read(&phba->rxtable_idx_head);
+- tail = atomic_read(&phba->rxtable_idx_tail);
+- if (!phba->rxtable || head == tail) {
+- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+- "4411 Rxtable is empty\n");
+- return;
+- }
+- last = tail;
+- start = head;
+-
+- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
+- while (start != last) {
+- if (start)
+- start--;
+- else
+- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
+- entry = &phba->rxtable[start];
++ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
+- "Lat %lld ASz %lld Info %02d BWUtil %d "
+- "Int %d slot %d\n",
+- cnt, entry->max_bytes_per_interval,
+- entry->total_bytes, entry->rcv_bytes,
+- entry->avg_io_latency, entry->avg_io_size,
+- entry->cmf_info, entry->timer_utilization,
+- entry->timer_interval, start);
+- cnt++;
+- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
+- return;
++ "4411 Rx Monitor Info is empty.\n");
++ } else {
++ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
++ LPFC_MAX_RXMONITOR_DUMP);
+ }
+ }
+
+@@ -5866,11 +5840,10 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ {
+ struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
+ cmf_timer);
+- struct rxtable_entry *entry;
++ struct rx_info_entry entry;
+ uint32_t io_cnt;
+- uint32_t head, tail;
+ uint32_t busy, max_read;
+- uint64_t total, rcv, lat, mbpi;
++ uint64_t total, rcv, lat, mbpi, extra, cnt;
+ int timer_interval = LPFC_CMF_INTERVAL;
+ uint32_t ms;
+ struct lpfc_cgn_stat *cgs;
+@@ -5937,12 +5910,27 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ phba->hba_flag & HBA_SETUP) {
+ mbpi = phba->cmf_last_sync_bw;
+ phba->cmf_last_sync_bw = 0;
+- lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
++ extra = 0;
++
++ /* Calculate any extra bytes needed to account for the
++ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
++ * calculate the adjustment needed for total to reflect
++ * a full LPFC_CMF_INTERVAL.
++ */
++ if (ms && ms < LPFC_CMF_INTERVAL) {
++ cnt = div_u64(total, ms); /* bytes per ms */
++ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
++ if (cnt > mbpi)
++ cnt = mbpi;
++ extra = cnt - total;
++ }
++ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
+ } else {
+ /* For Monitor mode or link down we want mbpi
+ * to be the full link speed
+ */
+ mbpi = phba->cmf_link_byte_count;
++ extra = 0;
+ }
+ phba->cmf_timer_cnt++;
+
+@@ -5968,39 +5956,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ }
+
+ /* Save rxmonitor information for debug */
+- if (phba->rxtable) {
+- head = atomic_xchg(&phba->rxtable_idx_head,
+- LPFC_RXMONITOR_TABLE_IN_USE);
+- entry = &phba->rxtable[head];
+- entry->total_bytes = total;
+- entry->rcv_bytes = rcv;
+- entry->cmf_busy = busy;
+- entry->cmf_info = phba->cmf_active_info;
++ if (phba->rx_monitor) {
++ entry.total_bytes = total;
++ entry.cmf_bytes = total + extra;
++ entry.rcv_bytes = rcv;
++ entry.cmf_busy = busy;
++ entry.cmf_info = phba->cmf_active_info;
+ if (io_cnt) {
+- entry->avg_io_latency = div_u64(lat, io_cnt);
+- entry->avg_io_size = div_u64(rcv, io_cnt);
++ entry.avg_io_latency = div_u64(lat, io_cnt);
++ entry.avg_io_size = div_u64(rcv, io_cnt);
+ } else {
+- entry->avg_io_latency = 0;
+- entry->avg_io_size = 0;
++ entry.avg_io_latency = 0;
++ entry.avg_io_size = 0;
+ }
+- entry->max_read_cnt = max_read;
+- entry->io_cnt = io_cnt;
+- entry->max_bytes_per_interval = mbpi;
++ entry.max_read_cnt = max_read;
++ entry.io_cnt = io_cnt;
++ entry.max_bytes_per_interval = mbpi;
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+- entry->timer_utilization = phba->cmf_last_ts;
++ entry.timer_utilization = phba->cmf_last_ts;
+ else
+- entry->timer_utilization = ms;
+- entry->timer_interval = ms;
++ entry.timer_utilization = ms;
++ entry.timer_interval = ms;
+ phba->cmf_last_ts = 0;
+
+- /* Increment rxtable index */
+- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+- tail = atomic_read(&phba->rxtable_idx_tail);
+- if (head == tail) {
+- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+- atomic_set(&phba->rxtable_idx_tail, tail);
+- }
+- atomic_set(&phba->rxtable_idx_head, head);
++ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
+ }
+
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
+diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
+index 870e53b8f81dd..5d36b35148646 100644
+--- a/drivers/scsi/lpfc/lpfc_mem.c
++++ b/drivers/scsi/lpfc/lpfc_mem.c
+@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
+ phba->cgn_i = NULL;
+ }
+
+- /* Free RX table */
+- kfree(phba->rxtable);
+- phba->rxtable = NULL;
++ /* Free RX Monitor */
++ if (phba->rx_monitor) {
++ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
++ kfree(phba->rx_monitor);
++ phba->rx_monitor = NULL;
++ }
+
+ /* Free the iocb lookup array */
+ kfree(psli->iocbq_lookup);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 0024c0e0afd30..d6e761adf1f1d 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -7878,6 +7878,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
+ }
+ }
+
++/**
++ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @entries: Number of rx_info_entry objects to allocate in ring
++ *
++ * Return:
++ * 0 - Success
++ * ENOMEM - Failure to kmalloc
++ **/
++int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
++ u32 entries)
++{
++ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
++ GFP_KERNEL);
++ if (!rx_monitor->ring)
++ return -ENOMEM;
++
++ rx_monitor->head_idx = 0;
++ rx_monitor->tail_idx = 0;
++ spin_lock_init(&rx_monitor->lock);
++ rx_monitor->entries = entries;
++
++ return 0;
++}
++
++/**
++ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ **/
++void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
++{
++ spin_lock(&rx_monitor->lock);
++ kfree(rx_monitor->ring);
++ rx_monitor->ring = NULL;
++ rx_monitor->entries = 0;
++ rx_monitor->head_idx = 0;
++ rx_monitor->tail_idx = 0;
++ spin_unlock(&rx_monitor->lock);
++}
++
++/**
++ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @entry: Pointer to rx_info_entry
++ *
++ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
++ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
++ *
++ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
++ *
++ * In cases of old data overflow, we do a best effort of FIFO order.
++ **/
++void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
++ struct rx_info_entry *entry)
++{
++ struct rx_info_entry *ring = rx_monitor->ring;
++ u32 *head_idx = &rx_monitor->head_idx;
++ u32 *tail_idx = &rx_monitor->tail_idx;
++ spinlock_t *ring_lock = &rx_monitor->lock;
++ u32 ring_size = rx_monitor->entries;
++
++ spin_lock(ring_lock);
++ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
++ *tail_idx = (*tail_idx + 1) % ring_size;
++
++ /* Best effort of FIFO saved data */
++ if (*tail_idx == *head_idx)
++ *head_idx = (*head_idx + 1) % ring_size;
++
++ spin_unlock(ring_lock);
++}
++
++/**
++ * lpfc_rx_monitor_report - Read out rx_monitor's ring
++ * @phba: Pointer to lpfc_hba object
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @buf: Pointer to char buffer that will contain rx monitor info data
++ * @buf_len: Length buf including null char
++ * @max_read_entries: Maximum number of entries to read out of ring
++ *
++ * Used to dump/read what's in rx_monitor's ring buffer.
++ *
++ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
++ * information to kmsg instead of filling out buf.
++ *
++ * Return:
++ * Number of entries read out of the ring
++ **/
++u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
++ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
++ u32 buf_len, u32 max_read_entries)
++{
++ struct rx_info_entry *ring = rx_monitor->ring;
++ struct rx_info_entry *entry;
++ u32 *head_idx = &rx_monitor->head_idx;
++ u32 *tail_idx = &rx_monitor->tail_idx;
++ spinlock_t *ring_lock = &rx_monitor->lock;
++ u32 ring_size = rx_monitor->entries;
++ u32 cnt = 0;
++ char tmp[DBG_LOG_STR_SZ] = {0};
++ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
++
++ if (!log_to_kmsg) {
++ /* clear the buffer to be sure */
++ memset(buf, 0, buf_len);
++
++ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
++ "%-8s%-8s%-8s%-16s\n",
++ "MaxBPI", "Tot_Data_CMF",
++ "Tot_Data_Cmd", "Tot_Data_Cmpl",
++ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
++ "IO_cnt", "Info", "BWutil(ms)");
++ }
++
++ /* Needs to be _bh because record is called from timer interrupt
++ * context
++ */
++ spin_lock_bh(ring_lock);
++ while (*head_idx != *tail_idx) {
++ entry = &ring[*head_idx];
++
++ /* Read out this entry's data. */
++ if (!log_to_kmsg) {
++ /* If !log_to_kmsg, then store to buf. */
++ scnprintf(tmp, sizeof(tmp),
++ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
++ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
++ *head_idx, entry->max_bytes_per_interval,
++ entry->cmf_bytes, entry->total_bytes,
++ entry->rcv_bytes, entry->avg_io_latency,
++ entry->avg_io_size, entry->max_read_cnt,
++ entry->cmf_busy, entry->io_cnt,
++ entry->cmf_info, entry->timer_utilization,
++ entry->timer_interval);
++
++ /* Check for buffer overflow */
++ if ((strlen(buf) + strlen(tmp)) >= buf_len)
++ break;
++
++ /* Append entry's data to buffer */
++ strlcat(buf, tmp, buf_len);
++ } else {
++ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
++ "4410 %02u: MBPI %llu Xmit %llu "
++ "Cmpl %llu Lat %llu ASz %llu Info %02u "
++ "BWUtil %u Int %u slot %u\n",
++ cnt, entry->max_bytes_per_interval,
++ entry->total_bytes, entry->rcv_bytes,
++ entry->avg_io_latency,
++ entry->avg_io_size, entry->cmf_info,
++ entry->timer_utilization,
++ entry->timer_interval, *head_idx);
++ }
++
++ *head_idx = (*head_idx + 1) % ring_size;
++
++ /* Don't feed more than max_read_entries */
++ cnt++;
++ if (cnt >= max_read_entries)
++ break;
++ }
++ spin_unlock_bh(ring_lock);
++
++ return cnt;
++}
++
+ /**
+ * lpfc_cmf_setup - Initialize idle_stat tracking
+ * @phba: Pointer to HBA context object.
+@@ -8069,19 +8235,29 @@ no_cmf:
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+ /* Allocate RX Monitor Buffer */
+- if (!phba->rxtable) {
+- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
+- sizeof(struct rxtable_entry),
+- GFP_KERNEL);
+- if (!phba->rxtable) {
++ if (!phba->rx_monitor) {
++ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
++ GFP_KERNEL);
++
++ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2644 Failed to alloc memory "
+ "for RX Monitor Buffer\n");
+ return -ENOMEM;
+ }
++
++ /* Instruct the rx_monitor object to instantiate its ring */
++ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
++ LPFC_MAX_RXMONITOR_ENTRY)) {
++ kfree(phba->rx_monitor);
++ phba->rx_monitor = NULL;
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "2645 Failed to alloc memory "
++ "for RX Monitor's Ring\n");
++ return -ENOMEM;
++ }
+ }
+- atomic_set(&phba->rxtable_idx_head, 0);
+- atomic_set(&phba->rxtable_idx_tail, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 920aae661c5b2..774864b54b97c 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -816,6 +816,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
+ }
+
+ mutex_lock(&sdev->state_mutex);
++ switch (sdev->sdev_state) {
++ case SDEV_RUNNING:
++ case SDEV_OFFLINE:
++ break;
++ default:
++ mutex_unlock(&sdev->state_mutex);
++ return -EINVAL;
++ }
+ if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+ ret = 0;
+ } else {
+diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
+index e51d69c4729df..040ed56eb24f3 100644
+--- a/drivers/staging/media/meson/vdec/vdec.c
++++ b/drivers/staging/media/meson/vdec/vdec.c
+@@ -1105,6 +1105,7 @@ static int vdec_probe(struct platform_device *pdev)
+
+ err_vdev_release:
+ video_device_release(vdev);
++ v4l2_device_unregister(&core->v4l2_dev);
+ return ret;
+ }
+
+@@ -1113,6 +1114,7 @@ static int vdec_remove(struct platform_device *pdev)
+ struct amvdec_core *core = platform_get_drvdata(pdev);
+
+ video_unregister_device(core->vdev_dec);
++ v4l2_device_unregister(&core->v4l2_dev);
+
+ return 0;
+ }
+diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
+index 3fc426dad2df3..a44e5b53e7a91 100644
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -334,6 +334,9 @@ tee_ioctl_shm_register(struct tee_context *ctx,
+ if (data.flags)
+ return -EINVAL;
+
++ if (!access_ok((void __user *)(unsigned long)data.addr, data.length))
++ return -EFAULT;
++
+ shm = tee_shm_register(ctx, data.addr, data.length,
+ TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
+ if (IS_ERR(shm))
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index bd96ebb82c8ec..6fb4400333fb4 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -223,9 +223,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
+ goto err;
+ }
+
+- if (!access_ok((void __user *)addr, length))
+- return ERR_PTR(-EFAULT);
+-
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index 39fc96dc2531c..da63e76c7530c 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -118,7 +118,7 @@ config SERIAL_8250_CONSOLE
+
+ config SERIAL_8250_GSC
+ tristate
+- depends on SERIAL_8250 && GSC
++ depends on SERIAL_8250 && PARISC
+ default SERIAL_8250
+
+ config SERIAL_8250_DMA
+diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
+index 4379ca4842ae7..0f2677695b521 100644
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -591,6 +591,11 @@ static int ar933x_config_rs485(struct uart_port *port,
+ dev_err(port->dev, "RS485 needs rts-gpio\n");
+ return 1;
+ }
++
++ if (rs485conf->flags & SER_RS485_ENABLED)
++ gpiod_set_value(up->rts_gpiod,
++ !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
++
+ port->rs485 = *rs485conf;
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
+index b0470f4f595ee..3feb6e40d56d8 100644
+--- a/drivers/video/fbdev/stifb.c
++++ b/drivers/video/fbdev/stifb.c
+@@ -1041,6 +1041,48 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+ SETUP_FB(fb);
+ }
+
++#define ARTIST_VRAM_SIZE 0x000804
++#define ARTIST_VRAM_SRC 0x000808
++#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04
++#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00
++#define ARTIST_SRC_BM_ACCESS 0x018008
++#define ARTIST_FGCOLOR 0x018010
++#define ARTIST_BGCOLOR 0x018014
++#define ARTIST_BITMAP_OP 0x01801c
++
++static void
++stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++ struct stifb_info *fb = container_of(info, struct stifb_info, info);
++
++ if (rect->rop != ROP_COPY ||
++ (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32))
++ return cfb_fillrect(info, rect);
++
++ SETUP_HW(fb);
++
++ if (fb->info.var.bits_per_pixel == 32) {
++ WRITE_WORD(0xBBA0A000, fb, REG_10);
++
++ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
++ } else {
++ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
++
++ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
++ }
++
++ WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP);
++ WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS);
++ NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000);
++ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color);
++ WRITE_WORD(0, fb, ARTIST_BGCOLOR);
++
++ NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy));
++ SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height));
++
++ SETUP_FB(fb);
++}
++
+ static void __init
+ stifb_init_display(struct stifb_info *fb)
+ {
+@@ -1105,7 +1147,7 @@ static const struct fb_ops stifb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = stifb_setcolreg,
+ .fb_blank = stifb_blank,
+- .fb_fillrect = cfb_fillrect,
++ .fb_fillrect = stifb_fillrect,
+ .fb_copyarea = stifb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ };
+@@ -1297,7 +1339,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ goto out_err0;
+ }
+ info->screen_size = fix->smem_len;
+- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
++ info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+ info->pseudo_palette = &fb->pseudo_palette;
+
+ /* This has to be done !!! */
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 2e7c3e48bc9ce..60bec5a108fa3 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -289,8 +289,10 @@ static void prelim_release(struct preftree *preftree)
+ struct prelim_ref *ref, *next_ref;
+
+ rbtree_postorder_for_each_entry_safe(ref, next_ref,
+- &preftree->root.rb_root, rbnode)
++ &preftree->root.rb_root, rbnode) {
++ free_inode_elem_list(ref->inode_list);
+ free_pref(ref);
++ }
+
+ preftree->root = RB_ROOT_CACHED;
+ preftree->count = 0;
+@@ -648,6 +650,18 @@ unode_aux_to_inode_list(struct ulist_node *node)
+ return (struct extent_inode_elem *)(uintptr_t)node->aux;
+ }
+
++static void free_leaf_list(struct ulist *ulist)
++{
++ struct ulist_node *node;
++ struct ulist_iterator uiter;
++
++ ULIST_ITER_INIT(&uiter);
++ while ((node = ulist_next(ulist, &uiter)))
++ free_inode_elem_list(unode_aux_to_inode_list(node));
++
++ ulist_free(ulist);
++}
++
+ /*
+ * We maintain three separate rbtrees: one for direct refs, one for
+ * indirect refs which have a key, and one for indirect refs which do not
+@@ -762,7 +776,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
+ cond_resched();
+ }
+ out:
+- ulist_free(parents);
++ /*
++ * We may have inode lists attached to refs in the parents ulist, so we
++ * must free them before freeing the ulist and its refs.
++ */
++ free_leaf_list(parents);
+ return ret;
+ }
+
+@@ -1371,6 +1389,12 @@ again:
+ if (ret < 0)
+ goto out;
+ ref->inode_list = eie;
++ /*
++ * We transferred the list ownership to the ref,
++ * so set to NULL to avoid a double free in case
++ * an error happens after this.
++ */
++ eie = NULL;
+ }
+ ret = ulist_add_merge_ptr(refs, ref->parent,
+ ref->inode_list,
+@@ -1396,6 +1420,14 @@ again:
+ eie->next = ref->inode_list;
+ }
+ eie = NULL;
++ /*
++ * We have transferred the inode list ownership from
++ * this ref to the ref we added to the 'refs' ulist.
++ * So set this ref's inode list to NULL to avoid
++ * use-after-free when our caller uses it or double
++ * frees in case an error happens before we return.
++ */
++ ref->inode_list = NULL;
+ }
+ cond_resched();
+ }
+@@ -1412,24 +1444,6 @@ out:
+ return ret;
+ }
+
+-static void free_leaf_list(struct ulist *blocks)
+-{
+- struct ulist_node *node = NULL;
+- struct extent_inode_elem *eie;
+- struct ulist_iterator uiter;
+-
+- ULIST_ITER_INIT(&uiter);
+- while ((node = ulist_next(blocks, &uiter))) {
+- if (!node->aux)
+- continue;
+- eie = unode_aux_to_inode_list(node);
+- free_inode_elem_list(eie);
+- node->aux = 0;
+- }
+-
+- ulist_free(blocks);
+-}
+-
+ /*
+ * Finds all leafs with a reference to the specified combination of bytenr and
+ * offset. key_list_head will point to a list of corresponding keys (caller must
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 1d4c2397d0d62..fab7eb76e53b2 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ }
+
+ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+- u64 root_objectid, u32 generation,
++ u64 root_objectid, u64 generation,
+ int check_generation)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
+index f32f4113c976a..5afb7ca428289 100644
+--- a/fs/btrfs/export.h
++++ b/fs/btrfs/export.h
+@@ -19,7 +19,7 @@ struct btrfs_fid {
+ } __attribute__ ((packed));
+
+ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+- u64 root_objectid, u32 generation,
++ u64 root_objectid, u64 generation,
+ int check_generation);
+ struct dentry *btrfs_get_parent(struct dentry *child);
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b90e9aa24005a..e72973f7c2cd3 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3307,21 +3307,22 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ }
+
+ /*
+- * If this is a leaf and there are tree mod log users, we may
+- * have recorded mod log operations that point to this leaf.
+- * So we must make sure no one reuses this leaf's extent before
+- * mod log operations are applied to a node, otherwise after
+- * rewinding a node using the mod log operations we get an
+- * inconsistent btree, as the leaf's extent may now be used as
+- * a node or leaf for another different btree.
++ * If there are tree mod log users we may have recorded mod log
++ * operations for this node. If we re-allocate this node we
++ * could replay operations on this node that happened when it
++ * existed in a completely different root. For example if it
++ * was part of root A, then was reallocated to root B, and we
++ * are doing a btrfs_old_search_slot(root b), we could replay
++ * operations that happened when the block was part of root A,
++ * giving us an inconsistent view of the btree.
++ *
+ * We are safe from races here because at this point no other
+ * node or root points to this extent buffer, so if after this
+- * check a new tree mod log user joins, it will not be able to
+- * find a node pointing to this leaf and record operations that
+- * point to this leaf.
++ * check a new tree mod log user joins we will not have an
++ * existing log of operations on this node that we have to
++ * contend with.
+ */
+- if (btrfs_header_level(buf) == 0 &&
+- test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
++ if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
+ must_pin = true;
+
+ if (must_pin || btrfs_is_zoned(fs_info)) {
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 1c597cd6c0247..90934711dcf0f 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1906,7 +1906,6 @@ static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+
+ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ {
+- const bool is_sync_write = (iocb->ki_flags & IOCB_DSYNC);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+@@ -1917,6 +1916,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ loff_t endbyte;
+ ssize_t err;
+ unsigned int ilock_flags = 0;
++ struct iomap_dio *dio;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ ilock_flags |= BTRFS_ILOCK_TRY;
+@@ -1959,15 +1959,6 @@ relock:
+ goto buffered;
+ }
+
+- /*
+- * We remove IOCB_DSYNC so that we don't deadlock when iomap_dio_rw()
+- * calls generic_write_sync() (through iomap_dio_complete()), because
+- * that results in calling fsync (btrfs_sync_file()) which will try to
+- * lock the inode in exclusive/write mode.
+- */
+- if (is_sync_write)
+- iocb->ki_flags &= ~IOCB_DSYNC;
+-
+ /*
+ * The iov_iter can be mapped to the same file range we are writing to.
+ * If that's the case, then we will deadlock in the iomap code, because
+@@ -1986,12 +1977,23 @@ relock:
+ * So here we disable page faults in the iov_iter and then retry if we
+ * got -EFAULT, faulting in the pages before the retry.
+ */
+-again:
+ from->nofault = true;
+- err = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+- IOMAP_DIO_PARTIAL, written);
++ dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
++ IOMAP_DIO_PARTIAL, written);
+ from->nofault = false;
+
++ /*
++ * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
++ * iocb, and that needs to lock the inode. So unlock it before calling
++ * iomap_dio_complete() to avoid a deadlock.
++ */
++ btrfs_inode_unlock(inode, ilock_flags);
++
++ if (IS_ERR_OR_NULL(dio))
++ err = PTR_ERR_OR_ZERO(dio);
++ else
++ err = iomap_dio_complete(dio);
++
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (err > 0)
+ written = err;
+@@ -2017,19 +2019,10 @@ again:
+ } else {
+ fault_in_iov_iter_readable(from, left);
+ prev_left = left;
+- goto again;
++ goto relock;
+ }
+ }
+
+- btrfs_inode_unlock(inode, ilock_flags);
+-
+- /*
+- * Add back IOCB_DSYNC. Our caller, btrfs_file_write_iter(), will do
+- * the fsync (call generic_write_sync()).
+- */
+- if (is_sync_write)
+- iocb->ki_flags |= IOCB_DSYNC;
+-
+ /* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
+ if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
+ goto out;
+diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
+index 19ba7d5b7d8ff..a374b62c9de9e 100644
+--- a/fs/btrfs/tests/qgroup-tests.c
++++ b/fs/btrfs/tests/qgroup-tests.c
+@@ -232,8 +232,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
+
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
+- if (ret)
++ if (ret) {
++ ulist_free(old_roots);
+ return ret;
++ }
+
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ if (ret) {
+@@ -266,8 +268,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
+ }
+
+ ret = remove_extent_item(root, nodesize, nodesize);
+- if (ret)
++ if (ret) {
++ ulist_free(old_roots);
+ return -EINVAL;
++ }
+
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ if (ret) {
+@@ -329,8 +333,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
+- if (ret)
++ if (ret) {
++ ulist_free(old_roots);
+ return ret;
++ }
+
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ if (ret) {
+@@ -362,8 +368,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+
+ ret = add_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
+- if (ret)
++ if (ret) {
++ ulist_free(old_roots);
+ return ret;
++ }
+
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ if (ret) {
+@@ -401,8 +409,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+
+ ret = remove_extent_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
+- if (ret)
++ if (ret) {
++ ulist_free(old_roots);
+ return ret;
++ }
+
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false);
+ if (ret) {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 278634a63895d..ad5c935f7f069 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3711,12 +3711,11 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ pSMB->AndXCommand = 0xFF;
+ pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
+ bcc_ptr = &pSMB->Password[0];
+- if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) {
+- pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
+- *bcc_ptr = 0; /* password is null byte */
+- bcc_ptr++; /* skip password */
+- /* already aligned so no need to do it below */
+- }
++
++ pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
++ *bcc_ptr = 0; /* password is null byte */
++ bcc_ptr++; /* skip password */
++ /* already aligned so no need to do it below */
+
+ if (ses->server->sign)
+ smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
+index cb25ef0cdf1f3..373c434b375c0 100644
+--- a/fs/crypto/fscrypt_private.h
++++ b/fs/crypto/fscrypt_private.h
+@@ -220,7 +220,7 @@ struct fscrypt_info {
+ * will be NULL if the master key was found in a process-subscribed
+ * keyring rather than in the filesystem-level keyring.
+ */
+- struct key *ci_master_key;
++ struct fscrypt_master_key *ci_master_key;
+
+ /*
+ * Link in list of inodes that were unlocked with the master key.
+@@ -430,6 +430,40 @@ struct fscrypt_master_key_secret {
+ */
+ struct fscrypt_master_key {
+
++ /*
++ * Back-pointer to the super_block of the filesystem to which this
++ * master key has been added. Only valid if ->mk_active_refs > 0.
++ */
++ struct super_block *mk_sb;
++
++ /*
++ * Link in ->mk_sb->s_master_keys->key_hashtable.
++ * Only valid if ->mk_active_refs > 0.
++ */
++ struct hlist_node mk_node;
++
++ /* Semaphore that protects ->mk_secret and ->mk_users */
++ struct rw_semaphore mk_sem;
++
++ /*
++ * Active and structural reference counts. An active ref guarantees
++ * that the struct continues to exist, continues to be in the keyring
++ * ->mk_sb->s_master_keys, and that any embedded subkeys (e.g.
++ * ->mk_direct_keys) that have been prepared continue to exist.
++ * A structural ref only guarantees that the struct continues to exist.
++ *
++ * There is one active ref associated with ->mk_secret being present,
++ * and one active ref for each inode in ->mk_decrypted_inodes.
++ *
++ * There is one structural ref associated with the active refcount being
++ * nonzero. Finding a key in the keyring also takes a structural ref,
++ * which is then held temporarily while the key is operated on.
++ */
++ refcount_t mk_active_refs;
++ refcount_t mk_struct_refs;
++
++ struct rcu_head mk_rcu_head;
++
+ /*
+ * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
+ * executed, this is wiped and no new inodes can be unlocked with this
+@@ -438,7 +472,10 @@ struct fscrypt_master_key {
+ * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
+ * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
+ *
+- * Locking: protected by this master key's key->sem.
++ * While ->mk_secret is present, one ref in ->mk_active_refs is held.
++ *
++ * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs
++ * associated with this field is protected by ->mk_sem as well.
+ */
+ struct fscrypt_master_key_secret mk_secret;
+
+@@ -459,22 +496,12 @@ struct fscrypt_master_key {
+ *
+ * This is NULL for v1 policy keys; those can only be added by root.
+ *
+- * Locking: in addition to this keyring's own semaphore, this is
+- * protected by this master key's key->sem, so we can do atomic
+- * search+insert. It can also be searched without taking any locks, but
+- * in that case the returned key may have already been removed.
++ * Locking: protected by ->mk_sem. (We don't just rely on the keyrings
++ * subsystem semaphore ->mk_users->sem, as we need support for atomic
++ * search+insert along with proper synchronization with ->mk_secret.)
+ */
+ struct key *mk_users;
+
+- /*
+- * Length of ->mk_decrypted_inodes, plus one if mk_secret is present.
+- * Once this goes to 0, the master key is removed from ->s_master_keys.
+- * The 'struct fscrypt_master_key' will continue to live as long as the
+- * 'struct key' whose payload it is, but we won't let this reference
+- * count rise again.
+- */
+- refcount_t mk_refcount;
+-
+ /*
+ * List of inodes that were unlocked using this key. This allows the
+ * inodes to be evicted efficiently if the key is removed.
+@@ -500,10 +527,10 @@ static inline bool
+ is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
+ {
+ /*
+- * The READ_ONCE() is only necessary for fscrypt_drop_inode() and
+- * fscrypt_key_describe(). These run in atomic context, so they can't
+- * take the key semaphore and thus 'secret' can change concurrently
+- * which would be a data race. But they only need to know whether the
++ * The READ_ONCE() is only necessary for fscrypt_drop_inode().
++ * fscrypt_drop_inode() runs in atomic context, so it can't take the key
++ * semaphore and thus 'secret' can change concurrently which would be a
++ * data race. But fscrypt_drop_inode() only need to know whether the
+ * secret *was* present at the time of check, so READ_ONCE() suffices.
+ */
+ return READ_ONCE(secret->size) != 0;
+@@ -532,7 +559,11 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec)
+ return 0;
+ }
+
+-struct key *
++void fscrypt_put_master_key(struct fscrypt_master_key *mk);
++
++void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk);
++
++struct fscrypt_master_key *
+ fscrypt_find_master_key(struct super_block *sb,
+ const struct fscrypt_key_specifier *mk_spec);
+
+diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
+index af74599ae1cf0..be5c650e49573 100644
+--- a/fs/crypto/hooks.c
++++ b/fs/crypto/hooks.c
+@@ -5,8 +5,6 @@
+ * Encryption hooks for higher-level filesystem operations.
+ */
+
+-#include <linux/key.h>
+-
+ #include "fscrypt_private.h"
+
+ /**
+@@ -142,7 +140,6 @@ int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags)
+ {
+ struct fscrypt_info *ci;
+- struct key *key;
+ struct fscrypt_master_key *mk;
+ int err;
+
+@@ -158,14 +155,13 @@ int fscrypt_prepare_setflags(struct inode *inode,
+ ci = inode->i_crypt_info;
+ if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
+ return -EINVAL;
+- key = ci->ci_master_key;
+- mk = key->payload.data[0];
+- down_read(&key->sem);
++ mk = ci->ci_master_key;
++ down_read(&mk->mk_sem);
+ if (is_master_key_secret_present(&mk->mk_secret))
+ err = fscrypt_derive_dirhash_key(ci, mk);
+ else
+ err = -ENOKEY;
+- up_read(&key->sem);
++ up_read(&mk->mk_sem);
+ return err;
+ }
+ return 0;
+diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
+index 0b3ffbb4faf4a..02f8bf8bd54da 100644
+--- a/fs/crypto/keyring.c
++++ b/fs/crypto/keyring.c
+@@ -18,6 +18,7 @@
+ * information about these ioctls.
+ */
+
++#include <asm/unaligned.h>
+ #include <crypto/skcipher.h>
+ #include <linux/key-type.h>
+ #include <linux/random.h>
+@@ -25,6 +26,18 @@
+
+ #include "fscrypt_private.h"
+
++/* The master encryption keys for a filesystem (->s_master_keys) */
++struct fscrypt_keyring {
++ /*
++ * Lock that protects ->key_hashtable. It does *not* protect the
++ * fscrypt_master_key structs themselves.
++ */
++ spinlock_t lock;
++
++ /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */
++ struct hlist_head key_hashtable[128];
++};
++
+ static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret)
+ {
+ fscrypt_destroy_hkdf(&secret->hkdf);
+@@ -38,20 +51,70 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst,
+ memzero_explicit(src, sizeof(*src));
+ }
+
+-static void free_master_key(struct fscrypt_master_key *mk)
++static void fscrypt_free_master_key(struct rcu_head *head)
++{
++ struct fscrypt_master_key *mk =
++ container_of(head, struct fscrypt_master_key, mk_rcu_head);
++ /*
++ * The master key secret and any embedded subkeys should have already
++ * been wiped when the last active reference to the fscrypt_master_key
++ * struct was dropped; doing it here would be unnecessarily late.
++ * Nevertheless, use kfree_sensitive() in case anything was missed.
++ */
++ kfree_sensitive(mk);
++}
++
++void fscrypt_put_master_key(struct fscrypt_master_key *mk)
++{
++ if (!refcount_dec_and_test(&mk->mk_struct_refs))
++ return;
++ /*
++ * No structural references left, so free ->mk_users, and also free the
++ * fscrypt_master_key struct itself after an RCU grace period ensures
++ * that concurrent keyring lookups can no longer find it.
++ */
++ WARN_ON(refcount_read(&mk->mk_active_refs) != 0);
++ key_put(mk->mk_users);
++ mk->mk_users = NULL;
++ call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key);
++}
++
++void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk)
+ {
++ struct super_block *sb = mk->mk_sb;
++ struct fscrypt_keyring *keyring = sb->s_master_keys;
+ size_t i;
+
+- wipe_master_key_secret(&mk->mk_secret);
++ if (!refcount_dec_and_test(&mk->mk_active_refs))
++ return;
++ /*
++ * No active references left, so complete the full removal of this
++ * fscrypt_master_key struct by removing it from the keyring and
++ * destroying any subkeys embedded in it.
++ */
++
++ spin_lock(&keyring->lock);
++ hlist_del_rcu(&mk->mk_node);
++ spin_unlock(&keyring->lock);
++
++ /*
++ * ->mk_active_refs == 0 implies that ->mk_secret is not present and
++ * that ->mk_decrypted_inodes is empty.
++ */
++ WARN_ON(is_master_key_secret_present(&mk->mk_secret));
++ WARN_ON(!list_empty(&mk->mk_decrypted_inodes));
+
+ for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
+ fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]);
+ fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]);
+ fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]);
+ }
++ memzero_explicit(&mk->mk_ino_hash_key,
++ sizeof(mk->mk_ino_hash_key));
++ mk->mk_ino_hash_key_initialized = false;
+
+- key_put(mk->mk_users);
+- kfree_sensitive(mk);
++ /* Drop the structural ref associated with the active refs. */
++ fscrypt_put_master_key(mk);
+ }
+
+ static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
+@@ -61,44 +124,6 @@ static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
+ return master_key_spec_len(spec) != 0;
+ }
+
+-static int fscrypt_key_instantiate(struct key *key,
+- struct key_preparsed_payload *prep)
+-{
+- key->payload.data[0] = (struct fscrypt_master_key *)prep->data;
+- return 0;
+-}
+-
+-static void fscrypt_key_destroy(struct key *key)
+-{
+- free_master_key(key->payload.data[0]);
+-}
+-
+-static void fscrypt_key_describe(const struct key *key, struct seq_file *m)
+-{
+- seq_puts(m, key->description);
+-
+- if (key_is_positive(key)) {
+- const struct fscrypt_master_key *mk = key->payload.data[0];
+-
+- if (!is_master_key_secret_present(&mk->mk_secret))
+- seq_puts(m, ": secret removed");
+- }
+-}
+-
+-/*
+- * Type of key in ->s_master_keys. Each key of this type represents a master
+- * key which has been added to the filesystem. Its payload is a
+- * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents
+- * users from adding keys of this type via the keyrings syscalls rather than via
+- * the intended method of FS_IOC_ADD_ENCRYPTION_KEY.
+- */
+-static struct key_type key_type_fscrypt = {
+- .name = "._fscrypt",
+- .instantiate = fscrypt_key_instantiate,
+- .destroy = fscrypt_key_destroy,
+- .describe = fscrypt_key_describe,
+-};
+-
+ static int fscrypt_user_key_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+ {
+@@ -131,32 +156,6 @@ static struct key_type key_type_fscrypt_user = {
+ .describe = fscrypt_user_key_describe,
+ };
+
+-/* Search ->s_master_keys or ->mk_users */
+-static struct key *search_fscrypt_keyring(struct key *keyring,
+- struct key_type *type,
+- const char *description)
+-{
+- /*
+- * We need to mark the keyring reference as "possessed" so that we
+- * acquire permission to search it, via the KEY_POS_SEARCH permission.
+- */
+- key_ref_t keyref = make_key_ref(keyring, true /* possessed */);
+-
+- keyref = keyring_search(keyref, type, description, false);
+- if (IS_ERR(keyref)) {
+- if (PTR_ERR(keyref) == -EAGAIN || /* not found */
+- PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
+- keyref = ERR_PTR(-ENOKEY);
+- return ERR_CAST(keyref);
+- }
+- return key_ref_to_ptr(keyref);
+-}
+-
+-#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \
+- (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id))
+-
+-#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
+-
+ #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \
+ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \
+ CONST_STRLEN("-users") + 1)
+@@ -164,21 +163,6 @@ static struct key *search_fscrypt_keyring(struct key *keyring,
+ #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \
+ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1)
+
+-static void format_fs_keyring_description(
+- char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE],
+- const struct super_block *sb)
+-{
+- sprintf(description, "fscrypt-%s", sb->s_id);
+-}
+-
+-static void format_mk_description(
+- char description[FSCRYPT_MK_DESCRIPTION_SIZE],
+- const struct fscrypt_key_specifier *mk_spec)
+-{
+- sprintf(description, "%*phN",
+- master_key_spec_len(mk_spec), (u8 *)&mk_spec->u);
+-}
+-
+ static void format_mk_users_keyring_description(
+ char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE],
+ const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+@@ -199,20 +183,15 @@ static void format_mk_user_description(
+ /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */
+ static int allocate_filesystem_keyring(struct super_block *sb)
+ {
+- char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE];
+- struct key *keyring;
++ struct fscrypt_keyring *keyring;
+
+ if (sb->s_master_keys)
+ return 0;
+
+- format_fs_keyring_description(description, sb);
+- keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+- current_cred(), KEY_POS_SEARCH |
+- KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW,
+- KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+- if (IS_ERR(keyring))
+- return PTR_ERR(keyring);
+-
++ keyring = kzalloc(sizeof(*keyring), GFP_KERNEL);
++ if (!keyring)
++ return -ENOMEM;
++ spin_lock_init(&keyring->lock);
+ /*
+ * Pairs with the smp_load_acquire() in fscrypt_find_master_key().
+ * I.e., here we publish ->s_master_keys with a RELEASE barrier so that
+@@ -222,21 +201,80 @@ static int allocate_filesystem_keyring(struct super_block *sb)
+ return 0;
+ }
+
+-void fscrypt_sb_free(struct super_block *sb)
++/*
++ * Release all encryption keys that have been added to the filesystem, along
++ * with the keyring that contains them.
++ *
++ * This is called at unmount time. The filesystem's underlying block device(s)
++ * are still available at this time; this is important because after user file
++ * accesses have been allowed, this function may need to evict keys from the
++ * keyslots of an inline crypto engine, which requires the block device(s).
++ *
++ * This is also called when the super_block is being freed. This is needed to
++ * avoid a memory leak if mounting fails after the "test_dummy_encryption"
++ * option was processed, as in that case the unmount-time call isn't made.
++ */
++void fscrypt_destroy_keyring(struct super_block *sb)
+ {
+- key_put(sb->s_master_keys);
++ struct fscrypt_keyring *keyring = sb->s_master_keys;
++ size_t i;
++
++ if (!keyring)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) {
++ struct hlist_head *bucket = &keyring->key_hashtable[i];
++ struct fscrypt_master_key *mk;
++ struct hlist_node *tmp;
++
++ hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
++ /*
++ * Since all inodes were already evicted, every key
++ * remaining in the keyring should have an empty inode
++ * list, and should only still be in the keyring due to
++ * the single active ref associated with ->mk_secret.
++ * There should be no structural refs beyond the one
++ * associated with the active ref.
++ */
++ WARN_ON(refcount_read(&mk->mk_active_refs) != 1);
++ WARN_ON(refcount_read(&mk->mk_struct_refs) != 1);
++ WARN_ON(!is_master_key_secret_present(&mk->mk_secret));
++ wipe_master_key_secret(&mk->mk_secret);
++ fscrypt_put_master_key_activeref(mk);
++ }
++ }
++ kfree_sensitive(keyring);
+ sb->s_master_keys = NULL;
+ }
+
++static struct hlist_head *
++fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring,
++ const struct fscrypt_key_specifier *mk_spec)
++{
++ /*
++ * Since key specifiers should be "random" values, it is sufficient to
++ * use a trivial hash function that just takes the first several bits of
++ * the key specifier.
++ */
++ unsigned long i = get_unaligned((unsigned long *)&mk_spec->u);
++
++ return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)];
++}
++
+ /*
+- * Find the specified master key in ->s_master_keys.
+- * Returns ERR_PTR(-ENOKEY) if not found.
++ * Find the specified master key struct in ->s_master_keys and take a structural
++ * ref to it. The structural ref guarantees that the key struct continues to
++ * exist, but it does *not* guarantee that ->s_master_keys continues to contain
++ * the key struct. The structural ref needs to be dropped by
++ * fscrypt_put_master_key(). Returns NULL if the key struct is not found.
+ */
+-struct key *fscrypt_find_master_key(struct super_block *sb,
+- const struct fscrypt_key_specifier *mk_spec)
++struct fscrypt_master_key *
++fscrypt_find_master_key(struct super_block *sb,
++ const struct fscrypt_key_specifier *mk_spec)
+ {
+- struct key *keyring;
+- char description[FSCRYPT_MK_DESCRIPTION_SIZE];
++ struct fscrypt_keyring *keyring;
++ struct hlist_head *bucket;
++ struct fscrypt_master_key *mk;
+
+ /*
+ * Pairs with the smp_store_release() in allocate_filesystem_keyring().
+@@ -246,10 +284,38 @@ struct key *fscrypt_find_master_key(struct super_block *sb,
+ */
+ keyring = smp_load_acquire(&sb->s_master_keys);
+ if (keyring == NULL)
+- return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */
+-
+- format_mk_description(description, mk_spec);
+- return search_fscrypt_keyring(keyring, &key_type_fscrypt, description);
++ return NULL; /* No keyring yet, so no keys yet. */
++
++ bucket = fscrypt_mk_hash_bucket(keyring, mk_spec);
++ rcu_read_lock();
++ switch (mk_spec->type) {
++ case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
++ hlist_for_each_entry_rcu(mk, bucket, mk_node) {
++ if (mk->mk_spec.type ==
++ FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
++ memcmp(mk->mk_spec.u.descriptor,
++ mk_spec->u.descriptor,
++ FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 &&
++ refcount_inc_not_zero(&mk->mk_struct_refs))
++ goto out;
++ }
++ break;
++ case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
++ hlist_for_each_entry_rcu(mk, bucket, mk_node) {
++ if (mk->mk_spec.type ==
++ FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER &&
++ memcmp(mk->mk_spec.u.identifier,
++ mk_spec->u.identifier,
++ FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 &&
++ refcount_inc_not_zero(&mk->mk_struct_refs))
++ goto out;
++ }
++ break;
++ }
++ mk = NULL;
++out:
++ rcu_read_unlock();
++ return mk;
+ }
+
+ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
+@@ -277,17 +343,30 @@ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
+ static struct key *find_master_key_user(struct fscrypt_master_key *mk)
+ {
+ char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE];
++ key_ref_t keyref;
+
+ format_mk_user_description(description, mk->mk_spec.u.identifier);
+- return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user,
+- description);
++
++ /*
++ * We need to mark the keyring reference as "possessed" so that we
++ * acquire permission to search it, via the KEY_POS_SEARCH permission.
++ */
++ keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/),
++ &key_type_fscrypt_user, description, false);
++ if (IS_ERR(keyref)) {
++ if (PTR_ERR(keyref) == -EAGAIN || /* not found */
++ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
++ keyref = ERR_PTR(-ENOKEY);
++ return ERR_CAST(keyref);
++ }
++ return key_ref_to_ptr(keyref);
+ }
+
+ /*
+ * Give the current user a "key" in ->mk_users. This charges the user's quota
+ * and marks the master key as added by the current user, so that it cannot be
+- * removed by another user with the key. Either the master key's key->sem must
+- * be held for write, or the master key must be still undergoing initialization.
++ * removed by another user with the key. Either ->mk_sem must be held for
++ * write, or the master key must be still undergoing initialization.
+ */
+ static int add_master_key_user(struct fscrypt_master_key *mk)
+ {
+@@ -309,7 +388,7 @@ static int add_master_key_user(struct fscrypt_master_key *mk)
+
+ /*
+ * Remove the current user's "key" from ->mk_users.
+- * The master key's key->sem must be held for write.
++ * ->mk_sem must be held for write.
+ *
+ * Returns 0 if removed, -ENOKEY if not found, or another -errno code.
+ */
+@@ -327,63 +406,49 @@ static int remove_master_key_user(struct fscrypt_master_key *mk)
+ }
+
+ /*
+- * Allocate a new fscrypt_master_key which contains the given secret, set it as
+- * the payload of a new 'struct key' of type fscrypt, and link the 'struct key'
+- * into the given keyring. Synchronized by fscrypt_add_key_mutex.
++ * Allocate a new fscrypt_master_key, transfer the given secret over to it, and
++ * insert it into sb->s_master_keys.
+ */
+-static int add_new_master_key(struct fscrypt_master_key_secret *secret,
+- const struct fscrypt_key_specifier *mk_spec,
+- struct key *keyring)
++static int add_new_master_key(struct super_block *sb,
++ struct fscrypt_master_key_secret *secret,
++ const struct fscrypt_key_specifier *mk_spec)
+ {
++ struct fscrypt_keyring *keyring = sb->s_master_keys;
+ struct fscrypt_master_key *mk;
+- char description[FSCRYPT_MK_DESCRIPTION_SIZE];
+- struct key *key;
+ int err;
+
+ mk = kzalloc(sizeof(*mk), GFP_KERNEL);
+ if (!mk)
+ return -ENOMEM;
+
++ mk->mk_sb = sb;
++ init_rwsem(&mk->mk_sem);
++ refcount_set(&mk->mk_struct_refs, 1);
+ mk->mk_spec = *mk_spec;
+
+- move_master_key_secret(&mk->mk_secret, secret);
+-
+- refcount_set(&mk->mk_refcount, 1); /* secret is present */
+ INIT_LIST_HEAD(&mk->mk_decrypted_inodes);
+ spin_lock_init(&mk->mk_decrypted_inodes_lock);
+
+ if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
+ err = allocate_master_key_users_keyring(mk);
+ if (err)
+- goto out_free_mk;
++ goto out_put;
+ err = add_master_key_user(mk);
+ if (err)
+- goto out_free_mk;
++ goto out_put;
+ }
+
+- /*
+- * Note that we don't charge this key to anyone's quota, since when
+- * ->mk_users is in use those keys are charged instead, and otherwise
+- * (when ->mk_users isn't in use) only root can add these keys.
+- */
+- format_mk_description(description, mk_spec);
+- key = key_alloc(&key_type_fscrypt, description,
+- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+- KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW,
+- KEY_ALLOC_NOT_IN_QUOTA, NULL);
+- if (IS_ERR(key)) {
+- err = PTR_ERR(key);
+- goto out_free_mk;
+- }
+- err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL);
+- key_put(key);
+- if (err)
+- goto out_free_mk;
++ move_master_key_secret(&mk->mk_secret, secret);
++ refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */
+
++ spin_lock(&keyring->lock);
++ hlist_add_head_rcu(&mk->mk_node,
++ fscrypt_mk_hash_bucket(keyring, mk_spec));
++ spin_unlock(&keyring->lock);
+ return 0;
+
+-out_free_mk:
+- free_master_key(mk);
++out_put:
++ fscrypt_put_master_key(mk);
+ return err;
+ }
+
+@@ -392,42 +457,34 @@ out_free_mk:
+ static int add_existing_master_key(struct fscrypt_master_key *mk,
+ struct fscrypt_master_key_secret *secret)
+ {
+- struct key *mk_user;
+- bool rekey;
+ int err;
+
+ /*
+ * If the current user is already in ->mk_users, then there's nothing to
+- * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.)
++ * do. Otherwise, we need to add the user to ->mk_users. (Neither is
++ * applicable for v1 policy keys, which have NULL ->mk_users.)
+ */
+ if (mk->mk_users) {
+- mk_user = find_master_key_user(mk);
++ struct key *mk_user = find_master_key_user(mk);
++
+ if (mk_user != ERR_PTR(-ENOKEY)) {
+ if (IS_ERR(mk_user))
+ return PTR_ERR(mk_user);
+ key_put(mk_user);
+ return 0;
+ }
+- }
+-
+- /* If we'll be re-adding ->mk_secret, try to take the reference. */
+- rekey = !is_master_key_secret_present(&mk->mk_secret);
+- if (rekey && !refcount_inc_not_zero(&mk->mk_refcount))
+- return KEY_DEAD;
+-
+- /* Add the current user to ->mk_users, if applicable. */
+- if (mk->mk_users) {
+ err = add_master_key_user(mk);
+- if (err) {
+- if (rekey && refcount_dec_and_test(&mk->mk_refcount))
+- return KEY_DEAD;
++ if (err)
+ return err;
+- }
+ }
+
+ /* Re-add the secret if needed. */
+- if (rekey)
++ if (!is_master_key_secret_present(&mk->mk_secret)) {
++ if (!refcount_inc_not_zero(&mk->mk_active_refs))
++ return KEY_DEAD;
+ move_master_key_secret(&mk->mk_secret, secret);
++ }
++
+ return 0;
+ }
+
+@@ -436,38 +493,36 @@ static int do_add_master_key(struct super_block *sb,
+ const struct fscrypt_key_specifier *mk_spec)
+ {
+ static DEFINE_MUTEX(fscrypt_add_key_mutex);
+- struct key *key;
++ struct fscrypt_master_key *mk;
+ int err;
+
+ mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */
+-retry:
+- key = fscrypt_find_master_key(sb, mk_spec);
+- if (IS_ERR(key)) {
+- err = PTR_ERR(key);
+- if (err != -ENOKEY)
+- goto out_unlock;
++
++ mk = fscrypt_find_master_key(sb, mk_spec);
++ if (!mk) {
+ /* Didn't find the key in ->s_master_keys. Add it. */
+ err = allocate_filesystem_keyring(sb);
+- if (err)
+- goto out_unlock;
+- err = add_new_master_key(secret, mk_spec, sb->s_master_keys);
++ if (!err)
++ err = add_new_master_key(sb, secret, mk_spec);
+ } else {
+ /*
+ * Found the key in ->s_master_keys. Re-add the secret if
+ * needed, and add the user to ->mk_users if needed.
+ */
+- down_write(&key->sem);
+- err = add_existing_master_key(key->payload.data[0], secret);
+- up_write(&key->sem);
++ down_write(&mk->mk_sem);
++ err = add_existing_master_key(mk, secret);
++ up_write(&mk->mk_sem);
+ if (err == KEY_DEAD) {
+- /* Key being removed or needs to be removed */
+- key_invalidate(key);
+- key_put(key);
+- goto retry;
++ /*
++ * We found a key struct, but it's already been fully
++ * removed. Ignore the old struct and add a new one.
++ * fscrypt_add_key_mutex means we don't need to worry
++ * about concurrent adds.
++ */
++ err = add_new_master_key(sb, secret, mk_spec);
+ }
+- key_put(key);
++ fscrypt_put_master_key(mk);
+ }
+-out_unlock:
+ mutex_unlock(&fscrypt_add_key_mutex);
+ return err;
+ }
+@@ -731,19 +786,19 @@ int fscrypt_verify_key_added(struct super_block *sb,
+ const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
+ {
+ struct fscrypt_key_specifier mk_spec;
+- struct key *key, *mk_user;
+ struct fscrypt_master_key *mk;
++ struct key *mk_user;
+ int err;
+
+ mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
+ memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
+
+- key = fscrypt_find_master_key(sb, &mk_spec);
+- if (IS_ERR(key)) {
+- err = PTR_ERR(key);
++ mk = fscrypt_find_master_key(sb, &mk_spec);
++ if (!mk) {
++ err = -ENOKEY;
+ goto out;
+ }
+- mk = key->payload.data[0];
++ down_read(&mk->mk_sem);
+ mk_user = find_master_key_user(mk);
+ if (IS_ERR(mk_user)) {
+ err = PTR_ERR(mk_user);
+@@ -751,7 +806,8 @@ int fscrypt_verify_key_added(struct super_block *sb,
+ key_put(mk_user);
+ err = 0;
+ }
+- key_put(key);
++ up_read(&mk->mk_sem);
++ fscrypt_put_master_key(mk);
+ out:
+ if (err == -ENOKEY && capable(CAP_FOWNER))
+ err = 0;
+@@ -913,11 +969,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fscrypt_remove_key_arg __user *uarg = _uarg;
+ struct fscrypt_remove_key_arg arg;
+- struct key *key;
+ struct fscrypt_master_key *mk;
+ u32 status_flags = 0;
+ int err;
+- bool dead;
++ bool inodes_remain;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+@@ -937,12 +992,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+ return -EACCES;
+
+ /* Find the key being removed. */
+- key = fscrypt_find_master_key(sb, &arg.key_spec);
+- if (IS_ERR(key))
+- return PTR_ERR(key);
+- mk = key->payload.data[0];
+-
+- down_write(&key->sem);
++ mk = fscrypt_find_master_key(sb, &arg.key_spec);
++ if (!mk)
++ return -ENOKEY;
++ down_write(&mk->mk_sem);
+
+ /* If relevant, remove current user's (or all users) claim to the key */
+ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) {
+@@ -951,7 +1004,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+ else
+ err = remove_master_key_user(mk);
+ if (err) {
+- up_write(&key->sem);
++ up_write(&mk->mk_sem);
+ goto out_put_key;
+ }
+ if (mk->mk_users->keys.nr_leaves_on_tree != 0) {
+@@ -963,26 +1016,22 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+ status_flags |=
+ FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS;
+ err = 0;
+- up_write(&key->sem);
++ up_write(&mk->mk_sem);
+ goto out_put_key;
+ }
+ }
+
+ /* No user claims remaining. Go ahead and wipe the secret. */
+- dead = false;
++ err = -ENOKEY;
+ if (is_master_key_secret_present(&mk->mk_secret)) {
+ wipe_master_key_secret(&mk->mk_secret);
+- dead = refcount_dec_and_test(&mk->mk_refcount);
+- }
+- up_write(&key->sem);
+- if (dead) {
+- /*
+- * No inodes reference the key, and we wiped the secret, so the
+- * key object is free to be removed from the keyring.
+- */
+- key_invalidate(key);
++ fscrypt_put_master_key_activeref(mk);
+ err = 0;
+- } else {
++ }
++ inodes_remain = refcount_read(&mk->mk_active_refs) > 0;
++ up_write(&mk->mk_sem);
++
++ if (inodes_remain) {
+ /* Some inodes still reference this key; try to evict them. */
+ err = try_to_lock_encrypted_files(sb, mk);
+ if (err == -EBUSY) {
+@@ -998,7 +1047,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
+ * has been fully removed including all files locked.
+ */
+ out_put_key:
+- key_put(key);
++ fscrypt_put_master_key(mk);
+ if (err == 0)
+ err = put_user(status_flags, &uarg->removal_status_flags);
+ return err;
+@@ -1045,7 +1094,6 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
+ {
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fscrypt_get_key_status_arg arg;
+- struct key *key;
+ struct fscrypt_master_key *mk;
+ int err;
+
+@@ -1062,19 +1110,18 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
+ arg.user_count = 0;
+ memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved));
+
+- key = fscrypt_find_master_key(sb, &arg.key_spec);
+- if (IS_ERR(key)) {
+- if (key != ERR_PTR(-ENOKEY))
+- return PTR_ERR(key);
++ mk = fscrypt_find_master_key(sb, &arg.key_spec);
++ if (!mk) {
+ arg.status = FSCRYPT_KEY_STATUS_ABSENT;
+ err = 0;
+ goto out;
+ }
+- mk = key->payload.data[0];
+- down_read(&key->sem);
++ down_read(&mk->mk_sem);
+
+ if (!is_master_key_secret_present(&mk->mk_secret)) {
+- arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED;
++ arg.status = refcount_read(&mk->mk_active_refs) > 0 ?
++ FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED :
++ FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */;
+ err = 0;
+ goto out_release_key;
+ }
+@@ -1096,8 +1143,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
+ }
+ err = 0;
+ out_release_key:
+- up_read(&key->sem);
+- key_put(key);
++ up_read(&mk->mk_sem);
++ fscrypt_put_master_key(mk);
+ out:
+ if (!err && copy_to_user(uarg, &arg, sizeof(arg)))
+ err = -EFAULT;
+@@ -1109,13 +1156,9 @@ int __init fscrypt_init_keyring(void)
+ {
+ int err;
+
+- err = register_key_type(&key_type_fscrypt);
+- if (err)
+- return err;
+-
+ err = register_key_type(&key_type_fscrypt_user);
+ if (err)
+- goto err_unregister_fscrypt;
++ return err;
+
+ err = register_key_type(&key_type_fscrypt_provisioning);
+ if (err)
+@@ -1125,7 +1168,5 @@ int __init fscrypt_init_keyring(void)
+
+ err_unregister_fscrypt_user:
+ unregister_key_type(&key_type_fscrypt_user);
+-err_unregister_fscrypt:
+- unregister_key_type(&key_type_fscrypt);
+ return err;
+ }
+diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
+index 89cd533a88bff..c3fbd594cc79e 100644
+--- a/fs/crypto/keysetup.c
++++ b/fs/crypto/keysetup.c
+@@ -9,7 +9,6 @@
+ */
+
+ #include <crypto/skcipher.h>
+-#include <linux/key.h>
+ #include <linux/random.h>
+
+ #include "fscrypt_private.h"
+@@ -151,6 +150,7 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
+ {
+ crypto_free_skcipher(prep_key->tfm);
+ fscrypt_destroy_inline_crypt_key(prep_key);
++ memzero_explicit(prep_key, sizeof(*prep_key));
+ }
+
+ /* Given a per-file encryption key, set up the file's crypto transform object */
+@@ -404,20 +404,18 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
+ /*
+ * Find the master key, then set up the inode's actual encryption key.
+ *
+- * If the master key is found in the filesystem-level keyring, then the
+- * corresponding 'struct key' is returned in *master_key_ret with its semaphore
+- * read-locked. This is needed to ensure that only one task links the
+- * fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create
+- * an fscrypt_info for the same inode), and to synchronize the master key being
+- * removed with a new inode starting to use it.
++ * If the master key is found in the filesystem-level keyring, then it is
++ * returned in *mk_ret with its semaphore read-locked. This is needed to ensure
++ * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as
++ * multiple tasks may race to create an fscrypt_info for the same inode), and to
++ * synchronize the master key being removed with a new inode starting to use it.
+ */
+ static int setup_file_encryption_key(struct fscrypt_info *ci,
+ bool need_dirhash_key,
+- struct key **master_key_ret)
++ struct fscrypt_master_key **mk_ret)
+ {
+- struct key *key;
+- struct fscrypt_master_key *mk = NULL;
+ struct fscrypt_key_specifier mk_spec;
++ struct fscrypt_master_key *mk;
+ int err;
+
+ err = fscrypt_select_encryption_impl(ci);
+@@ -442,11 +440,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
+ return -EINVAL;
+ }
+
+- key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
+- if (IS_ERR(key)) {
+- if (key != ERR_PTR(-ENOKEY) ||
+- ci->ci_policy.version != FSCRYPT_POLICY_V1)
+- return PTR_ERR(key);
++ mk = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
++ if (!mk) {
++ if (ci->ci_policy.version != FSCRYPT_POLICY_V1)
++ return -ENOKEY;
+
+ /*
+ * As a legacy fallback for v1 policies, search for the key in
+@@ -456,9 +453,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
+ */
+ return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci);
+ }
+-
+- mk = key->payload.data[0];
+- down_read(&key->sem);
++ down_read(&mk->mk_sem);
+
+ /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
+ if (!is_master_key_secret_present(&mk->mk_secret)) {
+@@ -486,18 +481,18 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
+ if (err)
+ goto out_release_key;
+
+- *master_key_ret = key;
++ *mk_ret = mk;
+ return 0;
+
+ out_release_key:
+- up_read(&key->sem);
+- key_put(key);
++ up_read(&mk->mk_sem);
++ fscrypt_put_master_key(mk);
+ return err;
+ }
+
+ static void put_crypt_info(struct fscrypt_info *ci)
+ {
+- struct key *key;
++ struct fscrypt_master_key *mk;
+
+ if (!ci)
+ return;
+@@ -507,24 +502,18 @@ static void put_crypt_info(struct fscrypt_info *ci)
+ else if (ci->ci_owns_key)
+ fscrypt_destroy_prepared_key(&ci->ci_enc_key);
+
+- key = ci->ci_master_key;
+- if (key) {
+- struct fscrypt_master_key *mk = key->payload.data[0];
+-
++ mk = ci->ci_master_key;
++ if (mk) {
+ /*
+ * Remove this inode from the list of inodes that were unlocked
+- * with the master key.
+- *
+- * In addition, if we're removing the last inode from a key that
+- * already had its secret removed, invalidate the key so that it
+- * gets removed from ->s_master_keys.
++ * with the master key. In addition, if we're removing the last
++ * inode from a master key struct that already had its secret
++ * removed, then complete the full removal of the struct.
+ */
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+ list_del(&ci->ci_master_key_link);
+ spin_unlock(&mk->mk_decrypted_inodes_lock);
+- if (refcount_dec_and_test(&mk->mk_refcount))
+- key_invalidate(key);
+- key_put(key);
++ fscrypt_put_master_key_activeref(mk);
+ }
+ memzero_explicit(ci, sizeof(*ci));
+ kmem_cache_free(fscrypt_info_cachep, ci);
+@@ -538,7 +527,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
+ {
+ struct fscrypt_info *crypt_info;
+ struct fscrypt_mode *mode;
+- struct key *master_key = NULL;
++ struct fscrypt_master_key *mk = NULL;
+ int res;
+
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+@@ -561,8 +550,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
+ WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
+ crypt_info->ci_mode = mode;
+
+- res = setup_file_encryption_key(crypt_info, need_dirhash_key,
+- &master_key);
++ res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
+ if (res)
+ goto out;
+
+@@ -577,12 +565,9 @@ fscrypt_setup_encryption_info(struct inode *inode,
+ * We won the race and set ->i_crypt_info to our crypt_info.
+ * Now link it into the master key's inode list.
+ */
+- if (master_key) {
+- struct fscrypt_master_key *mk =
+- master_key->payload.data[0];
+-
+- refcount_inc(&mk->mk_refcount);
+- crypt_info->ci_master_key = key_get(master_key);
++ if (mk) {
++ crypt_info->ci_master_key = mk;
++ refcount_inc(&mk->mk_active_refs);
+ spin_lock(&mk->mk_decrypted_inodes_lock);
+ list_add(&crypt_info->ci_master_key_link,
+ &mk->mk_decrypted_inodes);
+@@ -592,9 +577,9 @@ fscrypt_setup_encryption_info(struct inode *inode,
+ }
+ res = 0;
+ out:
+- if (master_key) {
+- up_read(&master_key->sem);
+- key_put(master_key);
++ if (mk) {
++ up_read(&mk->mk_sem);
++ fscrypt_put_master_key(mk);
+ }
+ put_crypt_info(crypt_info);
+ return res;
+@@ -759,7 +744,6 @@ EXPORT_SYMBOL(fscrypt_free_inode);
+ int fscrypt_drop_inode(struct inode *inode)
+ {
+ const struct fscrypt_info *ci = fscrypt_get_info(inode);
+- const struct fscrypt_master_key *mk;
+
+ /*
+ * If ci is NULL, then the inode doesn't have an encryption key set up
+@@ -769,7 +753,6 @@ int fscrypt_drop_inode(struct inode *inode)
+ */
+ if (!ci || !ci->ci_master_key)
+ return 0;
+- mk = ci->ci_master_key->payload.data[0];
+
+ /*
+ * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
+@@ -788,6 +771,6 @@ int fscrypt_drop_inode(struct inode *inode)
+ * then the thread removing the key will either evict the inode itself
+ * or will correctly detect that it wasn't evicted due to the race.
+ */
+- return !is_master_key_secret_present(&mk->mk_secret);
++ return !is_master_key_secret_present(&ci->ci_master_key->mk_secret);
+ }
+ EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index ed3d623724cdd..cad34dbe8e298 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -692,12 +692,8 @@ int fscrypt_set_context(struct inode *inode, void *fs_data)
+ * delayed key setup that requires the inode number.
+ */
+ if (ci->ci_policy.version == FSCRYPT_POLICY_V2 &&
+- (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
+- const struct fscrypt_master_key *mk =
+- ci->ci_master_key->payload.data[0];
+-
+- fscrypt_hash_inode_number(ci, mk);
+- }
++ (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
++ fscrypt_hash_inode_number(ci, ci->ci_master_key);
+
+ return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data);
+ }
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index af5a75a89e6e1..b0ea646454ac8 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -425,7 +425,8 @@ int ext4_ext_migrate(struct inode *inode)
+ * already is extent-based, error out.
+ */
+ if (!ext4_has_feature_extents(inode->i_sb) ||
+- (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
++ ext4_has_inline_data(inode))
+ return -EINVAL;
+
+ if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 971a08d947f42..b8cf3e986b000 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2259,8 +2259,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+ memset(de, 0, len); /* wipe old data */
+ de = (struct ext4_dir_entry_2 *) data2;
+ top = data2 + len;
+- while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
++ while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) {
++ if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len,
++ (data2 + (blocksize - csum_size) -
++ (char *) de))) {
++ brelse(bh2);
++ brelse(bh);
++ return -EFSCORRUPTED;
++ }
+ de = de2;
++ }
+ de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
+ (char *) de, blocksize);
+
+diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
+index eacbd489e3bf1..840639e7a7caa 100644
+--- a/fs/ext4/verity.c
++++ b/fs/ext4/verity.c
+@@ -364,13 +364,14 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
+ pgoff_t index,
+ unsigned long num_ra_pages)
+ {
+- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+ struct page *page;
+
+ index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
+
+ page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
+ if (!page || !PageUptodate(page)) {
++ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
++
+ if (page)
+ put_page(page);
+ else if (num_ra_pages > 1)
+diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
+index fe5acdccaae19..a28968bb56e62 100644
+--- a/fs/f2fs/verity.c
++++ b/fs/f2fs/verity.c
+@@ -261,13 +261,14 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
+ pgoff_t index,
+ unsigned long num_ra_pages)
+ {
+- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+ struct page *page;
+
+ index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
+
+ page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
+ if (!page || !PageUptodate(page)) {
++ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
++
+ if (page)
+ put_page(page);
+ else if (num_ra_pages > 1)
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 71e9e301e569d..307e7dae3ab6b 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2975,6 +2975,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+ goto out;
+ }
+
++ err = file_modified(file);
++ if (err)
++ goto out;
++
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 7c9eb679dbdbf..6a3ba306c3216 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -228,8 +228,7 @@ again:
+ *
+ */
+ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
+- fmode_t type,
+- const nfs4_stateid *stateid,
++ fmode_t type, const nfs4_stateid *stateid,
+ unsigned long pagemod_limit)
+ {
+ struct nfs_delegation *delegation;
+@@ -239,25 +238,24 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation != NULL) {
+ spin_lock(&delegation->lock);
+- if (nfs4_is_valid_delegation(delegation, 0)) {
+- nfs4_stateid_copy(&delegation->stateid, stateid);
+- delegation->type = type;
+- delegation->pagemod_limit = pagemod_limit;
+- oldcred = delegation->cred;
+- delegation->cred = get_cred(cred);
+- clear_bit(NFS_DELEGATION_NEED_RECLAIM,
+- &delegation->flags);
+- spin_unlock(&delegation->lock);
+- rcu_read_unlock();
+- put_cred(oldcred);
+- trace_nfs4_reclaim_delegation(inode, type);
+- return;
+- }
+- /* We appear to have raced with a delegation return. */
++ nfs4_stateid_copy(&delegation->stateid, stateid);
++ delegation->type = type;
++ delegation->pagemod_limit = pagemod_limit;
++ oldcred = delegation->cred;
++ delegation->cred = get_cred(cred);
++ clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
++ if (test_and_clear_bit(NFS_DELEGATION_REVOKED,
++ &delegation->flags))
++ atomic_long_inc(&nfs_active_delegations);
+ spin_unlock(&delegation->lock);
++ rcu_read_unlock();
++ put_cred(oldcred);
++ trace_nfs4_reclaim_delegation(inode, type);
++ } else {
++ rcu_read_unlock();
++ nfs_inode_set_delegation(inode, cred, type, stateid,
++ pagemod_limit);
+ }
+- rcu_read_unlock();
+- nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit);
+ }
+
+ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 93f4d8257525b..da94bf2afd070 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -1077,6 +1077,9 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
+ status = nfs4_call_sync(server->client, server, msg,
+ &args.seq_args, &res.seq_res, 0);
+ if (status == 0) {
++ /* a zero-length count means clone to EOF in src */
++ if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
++ count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
+ nfs42_copy_dest_done(dst_inode, dst_offset, count);
+ status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
+ }
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index ed06b68b2b4e9..1bf7a72ebda6e 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp)
+ ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
+ "NFSv4.0 transport Slot table");
+ if (ret) {
++ nfs4_shutdown_slot_table(tbl);
+ kfree(tbl);
+ return ret;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 32ee79a992461..ecac56be6cb72 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1778,6 +1778,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+
+ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
+ {
++ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ /* Mark all delegations for reclaim */
+ nfs_delegation_mark_reclaim(clp);
+ nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
+@@ -2651,6 +2652,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ if (status < 0)
+ goto out_error;
+ nfs4_state_end_reclaim_reboot(clp);
++ continue;
+ }
+
+ /* Detect expired delegations... */
+diff --git a/fs/super.c b/fs/super.c
+index 87379bb1f7a30..7fa3ee79ec898 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -293,7 +293,7 @@ static void __put_super(struct super_block *s)
+ WARN_ON(s->s_inode_lru.node);
+ WARN_ON(!list_empty(&s->s_mounts));
+ security_sb_free(s);
+- fscrypt_sb_free(s);
++ fscrypt_destroy_keyring(s);
+ put_user_ns(s->s_user_ns);
+ kfree(s->s_subtype);
+ call_rcu(&s->rcu, destroy_super_rcu);
+@@ -454,6 +454,7 @@ void generic_shutdown_super(struct super_block *sb)
+ evict_inodes(sb);
+ /* only nonzero refcount inodes can have marks */
+ fsnotify_sb_delete(sb);
++ fscrypt_destroy_keyring(sb);
+ security_sb_delete(sb);
+
+ if (sb->s_dio_done_wq) {
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 34fb3431a8f36..292a5c40bd0c6 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+ void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+ #endif
+
+-int ghes_estatus_pool_init(int num_ghes);
++int ghes_estatus_pool_init(unsigned int num_ghes);
+
+ /* From drivers/edac/ghes_edac.c */
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index d34e8a7ed4d5c..4cd4ed34d3cb8 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1165,7 +1165,7 @@ efi_status_t efi_random_get_seed(void);
+ arch_efi_call_virt_teardown(); \
+ })
+
+-#define EFI_RANDOM_SEED_SIZE 64U
++#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
+
+ struct linux_efi_random_seed {
+ u32 size;
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index fd4c450dc6128..806ac72c72202 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1487,7 +1487,7 @@ struct super_block {
+ const struct xattr_handler **s_xattr;
+ #ifdef CONFIG_FS_ENCRYPTION
+ const struct fscrypt_operations *s_cop;
+- struct key *s_master_keys; /* master crypto keys in use */
++ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
+ #endif
+ #ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
+diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
+index e912ed9141d9d..3c7ea2cf85a58 100644
+--- a/include/linux/fscrypt.h
++++ b/include/linux/fscrypt.h
+@@ -294,7 +294,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+ }
+
+ /* keyring.c */
+-void fscrypt_sb_free(struct super_block *sb);
++void fscrypt_destroy_keyring(struct super_block *sb);
+ int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
+ int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
+ int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
+@@ -482,7 +482,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+ }
+
+ /* keyring.c */
+-static inline void fscrypt_sb_free(struct super_block *sb)
++static inline void fscrypt_destroy_keyring(struct super_block *sb)
+ {
+ }
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 22c1d935e22dd..af8e7045017fd 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -198,8 +198,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+- pmd_t *pmd, int flags);
++struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
++ int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int flags);
+ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+@@ -286,8 +286,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
+ return NULL;
+ }
+
+-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+- unsigned long address, pmd_t *pmd, int flags)
++static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
++ unsigned long address, int flags)
+ {
+ return NULL;
+ }
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index 95ec18c2f49ce..9a476f902c425 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -995,6 +995,8 @@ v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
+ {
++ if (WARN_ON(!state))
++ return NULL;
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &state->pads[pad].try_fmt;
+@@ -1013,6 +1015,8 @@ v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
+ {
++ if (WARN_ON(!state))
++ return NULL;
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &state->pads[pad].try_crop;
+@@ -1031,6 +1035,8 @@ v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad)
+ {
++ if (WARN_ON(!state))
++ return NULL;
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &state->pads[pad].try_compose;
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index f51c06ae365f5..6aef8cb11cc8c 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -35,8 +35,6 @@
+
+ /* This is used to register protocols. */
+ struct net_protocol {
+- int (*early_demux)(struct sk_buff *skb);
+- int (*early_demux_handler)(struct sk_buff *skb);
+ int (*handler)(struct sk_buff *skb);
+
+ /* This returns an error if we weren't able to handle the error. */
+@@ -52,8 +50,6 @@ struct net_protocol {
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol {
+- void (*early_demux)(struct sk_buff *skb);
+- void (*early_demux_handler)(struct sk_buff *skb);
+ int (*handler)(struct sk_buff *skb);
+
+ /* This returns an error if we weren't able to handle the error. */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index d2de3b7788a97..81ef95dc27ba5 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -921,7 +921,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
+
+ INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+ INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
+-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
++void tcp_v6_early_demux(struct sk_buff *skb);
+
+ #endif
+
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 438b1b01a56ce..930666c0b6e50 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -173,7 +173,7 @@ INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+ INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
++void udp_v6_early_demux(struct sk_buff *skb);
+ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
+
+ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 9df585b9467e4..55fffb7e6f1cc 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2208,8 +2208,11 @@ int enable_kprobe(struct kprobe *kp)
+ if (!kprobes_all_disarmed && kprobe_disabled(p)) {
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ ret = arm_kprobe(p);
+- if (ret)
++ if (ret) {
+ p->flags |= KPROBE_FLAG_DISABLED;
++ if (p != kp)
++ kp->flags |= KPROBE_FLAG_DISABLED;
++ }
+ }
+ out:
+ mutex_unlock(&kprobe_mutex);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 37db5bfa8edc1..c197f39fdb78d 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2948,18 +2948,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ command |= FTRACE_UPDATE_TRACE_FUNC;
+ }
+
+- if (!command || !ftrace_enabled) {
+- /*
+- * If these are dynamic or per_cpu ops, they still
+- * need their data freed. Since, function tracing is
+- * not currently active, we can just free them
+- * without synchronizing all CPUs.
+- */
+- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+- goto free_ops;
+-
+- return 0;
+- }
++ if (!command || !ftrace_enabled)
++ goto out;
+
+ /*
+ * If the ops uses a trampoline, then it needs to be
+@@ -2996,6 +2986,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ removed_ops = NULL;
+ ops->flags &= ~FTRACE_OPS_FL_REMOVING;
+
++out:
+ /*
+ * Dynamic ops may be freed, we must make sure that all
+ * callers are done before leaving this function.
+@@ -3023,7 +3014,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ synchronize_rcu_tasks();
+
+- free_ops:
+ ftrace_trampoline_free(ops);
+ }
+
+diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
+index 80e04a1e19772..d81f7c51025c7 100644
+--- a/kernel/trace/kprobe_event_gen_test.c
++++ b/kernel/trace/kprobe_event_gen_test.c
+@@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void)
+ KPROBE_GEN_TEST_FUNC,
+ KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
+ if (ret)
+- goto free;
++ goto out;
+
+ /* Use kprobe_event_add_fields to add the rest of the fields */
+
+ ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
+ if (ret)
+- goto free;
++ goto out;
+
+ /*
+ * This actually creates the event.
+ */
+ ret = kprobe_event_gen_cmd_end(&cmd);
+ if (ret)
+- goto free;
++ goto out;
+
+ /*
+ * Now get the gen_kprobe_test event file. We need to prevent
+@@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void)
+ goto delete;
+ }
+ out:
++ kfree(buf);
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ ret = kprobe_event_delete("gen_kprobe_test");
+- free:
+- kfree(buf);
+-
+ goto out;
+ }
+
+@@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void)
+ KPROBE_GEN_TEST_FUNC,
+ "$retval");
+ if (ret)
+- goto free;
++ goto out;
+
+ /*
+ * This actually creates the event.
+ */
+ ret = kretprobe_event_gen_cmd_end(&cmd);
+ if (ret)
+- goto free;
++ goto out;
+
+ /*
+ * Now get the gen_kretprobe_test event file. We need to
+@@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void)
+ goto delete;
+ }
+ out:
++ kfree(buf);
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ ret = kprobe_event_delete("gen_kretprobe_test");
+- free:
+- kfree(buf);
+-
+ goto out;
+ }
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 90fc95e852322..5e8ae0b3ef712 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -901,6 +901,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct rb_irq_work *rbwork;
+
++ if (!buffer)
++ return;
++
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+
+ /* Wake up individual ones too. One level recursion */
+@@ -909,7 +912,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+
+ rbwork = &buffer->irq_work;
+ } else {
++ if (WARN_ON_ONCE(!buffer->buffers))
++ return;
++ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
++ return;
++
+ cpu_buffer = buffer->buffers[cpu];
++ /* The CPU buffer may not have been initialized yet */
++ if (!cpu_buffer)
++ return;
+ rbwork = &cpu_buffer->irq_work;
+ }
+
+diff --git a/mm/gup.c b/mm/gup.c
+index 1a23cd0b4fba1..69e45cbe58f8c 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -509,6 +509,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return ERR_PTR(-EINVAL);
++
++ /*
++ * Considering PTE level hugetlb, like continuous-PTE hugetlb on
++ * ARM64 architecture.
++ */
++ if (is_vm_hugetlb_page(vma)) {
++ page = follow_huge_pmd_pte(vma, address, flags);
++ if (page)
++ return page;
++ return no_page_table(vma, flags);
++ }
++
+ retry:
+ if (unlikely(pmd_bad(*pmd)))
+ return no_page_table(vma, flags);
+@@ -652,7 +664,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
+ if (pmd_none(pmdval))
+ return no_page_table(vma, flags);
+ if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
+- page = follow_huge_pmd(mm, address, pmd, flags);
++ page = follow_huge_pmd_pte(vma, address, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f5f8929fad515..dbb63ec3b5fab 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6182,12 +6182,13 @@ follow_huge_pd(struct vm_area_struct *vma,
+ }
+
+ struct page * __weak
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+- pmd_t *pmd, int flags)
++follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
+ {
++ struct hstate *h = hstate_vma(vma);
++ struct mm_struct *mm = vma->vm_mm;
+ struct page *page = NULL;
+ spinlock_t *ptl;
+- pte_t pte;
++ pte_t *ptep, pte;
+
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+@@ -6195,17 +6196,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ return NULL;
+
+ retry:
+- ptl = pmd_lockptr(mm, pmd);
+- spin_lock(ptl);
+- /*
+- * make sure that the address range covered by this pmd is not
+- * unmapped from other threads.
+- */
+- if (!pmd_huge(*pmd))
+- goto out;
+- pte = huge_ptep_get((pte_t *)pmd);
++ ptep = huge_pte_offset(mm, address, huge_page_size(h));
++ if (!ptep)
++ return NULL;
++
++ ptl = huge_pte_lock(h, mm, ptep);
++ pte = huge_ptep_get(ptep);
+ if (pte_present(pte)) {
+- page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
++ page = pte_page(pte) +
++ ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+ /*
+ * try_grab_page() should always succeed here, because: a) we
+ * hold the pmd (ptl) lock, and b) we've just checked that the
+@@ -6221,7 +6220,7 @@ retry:
+ } else {
+ if (is_hugetlb_entry_migration(pte)) {
+ spin_unlock(ptl);
+- __migration_entry_wait(mm, (pte_t *)pmd, ptl);
++ __migration_entry_wait(mm, ptep, ptl);
+ goto retry;
+ }
+ /*
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 8f1a95b9d3207..16f954a4802c3 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3764,7 +3764,8 @@ done:
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+
+- if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
++ if (remote_efs &&
++ test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+@@ -5813,6 +5814,19 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
+ scid, mtu, mps);
+
++ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
++ * page 1059:
++ *
++ * Valid range: 0x0001-0x00ff
++ *
++ * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
++ */
++ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
++ result = L2CAP_CR_LE_BAD_PSM;
++ chan = NULL;
++ goto response;
++ }
++
+ /* Check if we have socket listening on psm */
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+ &conn->hcon->dst, LE_LINK);
+@@ -6001,6 +6015,18 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+
+ psm = req->psm;
+
++ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
++ * page 1059:
++ *
++ * Valid range: 0x0001-0x00ff
++ *
++ * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
++ */
++ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
++ result = L2CAP_CR_LE_BAD_PSM;
++ goto response;
++ }
++
+ BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
+
+ memset(&pdu, 0, sizeof(pdu));
+@@ -6885,6 +6911,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+ {
++ struct l2cap_ctrl local_control;
+ int err = 0;
+ bool skb_in_use = false;
+
+@@ -6909,15 +6936,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ chan->buffer_seq = chan->expected_tx_seq;
+ skb_in_use = true;
+
++ /* l2cap_reassemble_sdu may free skb, hence invalidate
++ * control, so make a copy in advance to use it after
++ * l2cap_reassemble_sdu returns and to avoid the race
++ * condition, for example:
++ *
++ * The current thread calls:
++ * l2cap_reassemble_sdu
++ * chan->ops->recv == l2cap_sock_recv_cb
++ * __sock_queue_rcv_skb
++ * Another thread calls:
++ * bt_sock_recvmsg
++ * skb_recv_datagram
++ * skb_free_datagram
++ * Then the current thread tries to access control, but
++ * it was freed by skb_free_datagram.
++ */
++ local_control = *control;
+ err = l2cap_reassemble_sdu(chan, skb, control);
+ if (err)
+ break;
+
+- if (control->final) {
++ if (local_control.final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+- control->final = 0;
+- l2cap_retransmit_all(chan, control);
++ local_control.final = 0;
++ l2cap_retransmit_all(chan, &local_control);
+ l2cap_ertm_send(chan);
+ }
+ }
+@@ -7297,11 +7341,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+ {
++ /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
++ * the txseq field in advance to use it after l2cap_reassemble_sdu
++ * returns and to avoid the race condition, for example:
++ *
++ * The current thread calls:
++ * l2cap_reassemble_sdu
++ * chan->ops->recv == l2cap_sock_recv_cb
++ * __sock_queue_rcv_skb
++ * Another thread calls:
++ * bt_sock_recvmsg
++ * skb_recv_datagram
++ * skb_free_datagram
++ * Then the current thread tries to access control, but it was freed by
++ * skb_free_datagram.
++ */
++ u16 txseq = control->txseq;
++
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
+
+- if (l2cap_classify_txseq(chan, control->txseq) ==
+- L2CAP_TXSEQ_EXPECTED) {
++ if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
+
+ BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
+@@ -7324,8 +7384,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ }
+ }
+
+- chan->last_acked_seq = control->txseq;
+- chan->expected_tx_seq = __next_seq(chan, control->txseq);
++ chan->last_acked_seq = txseq;
++ chan->expected_tx_seq = __next_seq(chan, txseq);
+
+ return 0;
+ }
+@@ -7581,6 +7641,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+ return;
+ }
+
++ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+@@ -8426,9 +8487,8 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ * expected length.
+ */
+ if (skb->len < L2CAP_LEN_SIZE) {
+- if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
+- goto drop;
+- return;
++ l2cap_recv_frag(conn, skb, conn->mtu);
++ break;
+ }
+
+ len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
+@@ -8472,7 +8532,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+
+ /* Header still could not be read just continue */
+ if (conn->rx_skb->len < L2CAP_LEN_SIZE)
+- return;
++ break;
+ }
+
+ if (skb->len > conn->rx_len) {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index b3556c5c1c08e..0e22ecb469771 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -372,7 +372,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev, skip_perm);
+ pneigh_ifdown_and_unlock(tbl, dev);
+- pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
++ pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
+ if (skb_queue_empty_lockless(&tbl->proxy_queue))
+ del_timer_sync(&tbl->proxy_timer);
+ return 0;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 64a56db3de586..34763f575c308 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -1253,9 +1253,9 @@ static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
+ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
+ const char *user_protocol)
+ {
++ const struct dsa_device_ops *tag_ops = NULL;
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+- const struct dsa_device_ops *tag_ops;
+ enum dsa_tag_protocol default_proto;
+
+ /* Find out which protocol the switch would prefer. */
+@@ -1278,10 +1278,17 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
+ }
+
+ tag_ops = dsa_find_tagger_by_name(user_protocol);
+- } else {
+- tag_ops = dsa_tag_driver_get(default_proto);
++ if (IS_ERR(tag_ops)) {
++ dev_warn(ds->dev,
++ "Failed to find a tagging driver for protocol %s, using default\n",
++ user_protocol);
++ tag_ops = NULL;
++ }
+ }
+
++ if (!tag_ops)
++ tag_ops = dsa_tag_driver_get(default_proto);
++
+ if (IS_ERR(tag_ops)) {
+ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+ return -EPROBE_DEFER;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index e4b2ced66261b..91710e5eedff0 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1728,24 +1728,14 @@ static const struct net_protocol igmp_protocol = {
+ };
+ #endif
+
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct net_protocol tcp_protocol = {
+- .early_demux = tcp_v4_early_demux,
+- .early_demux_handler = tcp_v4_early_demux,
++static const struct net_protocol tcp_protocol = {
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+ };
+
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct net_protocol udp_protocol = {
+- .early_demux = udp_v4_early_demux,
+- .early_demux_handler = udp_v4_early_demux,
++static const struct net_protocol udp_protocol = {
+ .handler = udp_rcv,
+ .err_handler = udp_err,
+ .no_policy = 1,
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index d5222c0fa87cb..459d7e630cb04 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -310,14 +310,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
+ ip_hdr(hint)->tos == iph->tos;
+ }
+
+-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
+-INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
++int tcp_v4_early_demux(struct sk_buff *skb);
++int udp_v4_early_demux(struct sk_buff *skb);
+ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *dev,
+ const struct sk_buff *hint)
+ {
+ const struct iphdr *iph = ip_hdr(skb);
+- int (*edemux)(struct sk_buff *skb);
+ int err, drop_reason;
+ struct rtable *rt;
+
+@@ -330,21 +329,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+ goto drop_error;
+ }
+
+- if (net->ipv4.sysctl_ip_early_demux &&
++ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
+ !skb_dst(skb) &&
+ !skb->sk &&
+ !ip_is_fragment(iph)) {
+- const struct net_protocol *ipprot;
+- int protocol = iph->protocol;
+-
+- ipprot = rcu_dereference(inet_protos[protocol]);
+- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
+- err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
+- udp_v4_early_demux, skb);
+- if (unlikely(err))
+- goto drop_error;
+- /* must reload iph, skb->head might have changed */
+- iph = ip_hdr(skb);
++ switch (iph->protocol) {
++ case IPPROTO_TCP:
++ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
++ tcp_v4_early_demux(skb);
++
++ /* must reload iph, skb->head might have changed */
++ iph = ip_hdr(skb);
++ }
++ break;
++ case IPPROTO_UDP:
++ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
++ err = udp_v4_early_demux(skb);
++ if (unlikely(err))
++ goto drop_error;
++
++ /* must reload iph, skb->head might have changed */
++ iph = ip_hdr(skb);
++ }
++ break;
+ }
+ }
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index a36728277e321..495c58e442e2a 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -363,61 +363,6 @@ bad_key:
+ return ret;
+ }
+
+-static void proc_configure_early_demux(int enabled, int protocol)
+-{
+- struct net_protocol *ipprot;
+-#if IS_ENABLED(CONFIG_IPV6)
+- struct inet6_protocol *ip6prot;
+-#endif
+-
+- rcu_read_lock();
+-
+- ipprot = rcu_dereference(inet_protos[protocol]);
+- if (ipprot)
+- ipprot->early_demux = enabled ? ipprot->early_demux_handler :
+- NULL;
+-
+-#if IS_ENABLED(CONFIG_IPV6)
+- ip6prot = rcu_dereference(inet6_protos[protocol]);
+- if (ip6prot)
+- ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
+- NULL;
+-#endif
+- rcu_read_unlock();
+-}
+-
+-static int proc_tcp_early_demux(struct ctl_table *table, int write,
+- void *buffer, size_t *lenp, loff_t *ppos)
+-{
+- int ret = 0;
+-
+- ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
+-
+- if (write && !ret) {
+- int enabled = init_net.ipv4.sysctl_tcp_early_demux;
+-
+- proc_configure_early_demux(enabled, IPPROTO_TCP);
+- }
+-
+- return ret;
+-}
+-
+-static int proc_udp_early_demux(struct ctl_table *table, int write,
+- void *buffer, size_t *lenp, loff_t *ppos)
+-{
+- int ret = 0;
+-
+- ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
+-
+- if (write && !ret) {
+- int enabled = init_net.ipv4.sysctl_udp_early_demux;
+-
+- proc_configure_early_demux(enabled, IPPROTO_UDP);
+- }
+-
+- return ret;
+-}
+-
+ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+@@ -720,14 +665,14 @@ static struct ctl_table ipv4_net_table[] = {
+ .data = &init_net.ipv4.sysctl_udp_early_demux,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+- .proc_handler = proc_udp_early_demux
++ .proc_handler = proc_dou8vec_minmax,
+ },
+ {
+ .procname = "tcp_early_demux",
+ .data = &init_net.ipv4.sysctl_tcp_early_demux,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+- .proc_handler = proc_tcp_early_demux
++ .proc_handler = proc_dou8vec_minmax,
+ },
+ {
+ .procname = "nexthop_compat_mode",
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index d4b1e2c5aa76d..32071529bfd98 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -45,20 +45,23 @@
+ #include <net/inet_ecn.h>
+ #include <net/dst_metadata.h>
+
+-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *));
+ static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+ {
+- void (*edemux)(struct sk_buff *skb);
+-
+- if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
+- const struct inet6_protocol *ipprot;
+-
+- ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
+- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
+- INDIRECT_CALL_2(edemux, tcp_v6_early_demux,
+- udp_v6_early_demux, skb);
++ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
++ !skb_dst(skb) && !skb->sk) {
++ switch (ipv6_hdr(skb)->nexthdr) {
++ case IPPROTO_TCP:
++ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
++ tcp_v6_early_demux(skb);
++ break;
++ case IPPROTO_UDP:
++ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
++ udp_v6_early_demux(skb);
++ break;
++ }
+ }
++
+ if (!skb_valid_dst(skb))
+ ip6_route_input(skb);
+ }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 27274fc3619ab..0655fd8c67e93 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -6570,10 +6570,16 @@ static void __net_exit ip6_route_net_exit(struct net *net)
+ static int __net_init ip6_route_net_init_late(struct net *net)
+ {
+ #ifdef CONFIG_PROC_FS
+- proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
+- sizeof(struct ipv6_route_iter));
+- proc_create_net_single("rt6_stats", 0444, net->proc_net,
+- rt6_stats_seq_show, NULL);
++ if (!proc_create_net("ipv6_route", 0, net->proc_net,
++ &ipv6_route_seq_ops,
++ sizeof(struct ipv6_route_iter)))
++ return -ENOMEM;
++
++ if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
++ rt6_stats_seq_show, NULL)) {
++ remove_proc_entry("ipv6_route", net->proc_net);
++ return -ENOMEM;
++ }
+ #endif
+ return 0;
+ }
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 66d00368db828..51f4d330e820b 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1854,7 +1854,7 @@ do_time_wait:
+ goto discard_it;
+ }
+
+-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
++void tcp_v6_early_demux(struct sk_buff *skb)
+ {
+ const struct ipv6hdr *hdr;
+ const struct tcphdr *th;
+@@ -2209,12 +2209,7 @@ struct proto tcpv6_prot = {
+ };
+ EXPORT_SYMBOL_GPL(tcpv6_prot);
+
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct inet6_protocol tcpv6_protocol = {
+- .early_demux = tcp_v6_early_demux,
+- .early_demux_handler = tcp_v6_early_demux,
++static const struct inet6_protocol tcpv6_protocol = {
+ .handler = tcp_v6_rcv,
+ .err_handler = tcp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 19b6c4da0f42a..9dfb4bb54344b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1046,7 +1046,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
+ return NULL;
+ }
+
+-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
++void udp_v6_early_demux(struct sk_buff *skb)
+ {
+ struct net *net = dev_net(skb->dev);
+ const struct udphdr *uh;
+@@ -1659,12 +1659,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
+ return ipv6_getsockopt(sk, level, optname, optval, optlen);
+ }
+
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct inet6_protocol udpv6_protocol = {
+- .early_demux = udp_v6_early_demux,
+- .early_demux_handler = udp_v6_early_demux,
++static const struct inet6_protocol udpv6_protocol = {
+ .handler = udpv6_rcv,
+ .err_handler = udpv6_err,
+ .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 6e391308431da..3adc291d9ce18 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -42,31 +42,8 @@
+ #define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
+ /* Max muber of elements in the array block when tuned */
+ #define AHASH_MAX_TUNED 64
+-
+ #define AHASH_MAX(h) ((h)->bucketsize)
+
+-/* Max number of elements can be tuned */
+-#ifdef IP_SET_HASH_WITH_MULTI
+-static u8
+-tune_bucketsize(u8 curr, u32 multi)
+-{
+- u32 n;
+-
+- if (multi < curr)
+- return curr;
+-
+- n = curr + AHASH_INIT_SIZE;
+- /* Currently, at listing one hash bucket must fit into a message.
+- * Therefore we have a hard limit here.
+- */
+- return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
+-}
+-#define TUNE_BUCKETSIZE(h, multi) \
+- ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
+-#else
+-#define TUNE_BUCKETSIZE(h, multi)
+-#endif
+-
+ /* A hash bucket */
+ struct hbucket {
+ struct rcu_head rcu; /* for call_rcu */
+@@ -936,7 +913,12 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ goto set_full;
+ /* Create a new slot */
+ if (n->pos >= n->size) {
+- TUNE_BUCKETSIZE(h, multi);
++#ifdef IP_SET_HASH_WITH_MULTI
++ if (h->bucketsize >= AHASH_MAX_TUNED)
++ goto set_full;
++ else if (h->bucketsize < multi)
++ h->bucketsize += AHASH_INIT_SIZE;
++#endif
+ if (n->size >= AHASH_MAX(h)) {
+ /* Trigger rehashing */
+ mtype_data_next(&h->next, d);
+diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
+index f9b16f2b22191..fdacbc3c15bef 100644
+--- a/net/netfilter/ipvs/ip_vs_app.c
++++ b/net/netfilter/ipvs/ip_vs_app.c
+@@ -599,13 +599,19 @@ static const struct seq_operations ip_vs_app_seq_ops = {
+ int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
+ {
+ INIT_LIST_HEAD(&ipvs->app_list);
+- proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops,
+- sizeof(struct seq_net_private));
++#ifdef CONFIG_PROC_FS
++ if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net,
++ &ip_vs_app_seq_ops,
++ sizeof(struct seq_net_private)))
++ return -ENOMEM;
++#endif
+ return 0;
+ }
+
+ void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
+ {
+ unregister_ip_vs_app(ipvs, NULL /* all */);
++#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
++#endif
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index fb67f1ca2495b..cb6d68220c265 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1265,8 +1265,8 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
+ * The drop rate array needs tuning for real environments.
+ * Called from timer bh only => no locking
+ */
+- static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+- static char todrop_counter[9] = {0};
++ static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
++ static signed char todrop_counter[9] = {0};
+ int i;
+
+ /* if the conn entry hasn't lasted for 60 seconds, don't drop it.
+@@ -1447,20 +1447,36 @@ int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
+ {
+ atomic_set(&ipvs->conn_count, 0);
+
+- proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
+- &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state));
+- proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
+- &ip_vs_conn_sync_seq_ops,
+- sizeof(struct ip_vs_iter_state));
++#ifdef CONFIG_PROC_FS
++ if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
++ &ip_vs_conn_seq_ops,
++ sizeof(struct ip_vs_iter_state)))
++ goto err_conn;
++
++ if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
++ &ip_vs_conn_sync_seq_ops,
++ sizeof(struct ip_vs_iter_state)))
++ goto err_conn_sync;
++#endif
++
+ return 0;
++
++#ifdef CONFIG_PROC_FS
++err_conn_sync:
++ remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
++err_conn:
++ return -ENOMEM;
++#endif
+ }
+
+ void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
+ {
+ /* flush all the connection entries first */
+ ip_vs_conn_flush(ipvs);
++#ifdef CONFIG_PROC_FS
+ remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
+ remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
++#endif
+ }
+
+ int __init ip_vs_conn_init(void)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f7a5b8414423d..899f01c6c26c6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8308,9 +8308,6 @@ static void nft_commit_release(struct nft_trans *trans)
+ nf_tables_chain_destroy(&trans->ctx);
+ break;
+ case NFT_MSG_DELRULE:
+- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+-
+ nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ break;
+ case NFT_MSG_DELSET:
+@@ -8767,6 +8764,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ nft_rule_expr_deactivate(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_TRANS_COMMIT);
++
++ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ break;
+ case NFT_MSG_NEWSET:
+ nft_clear(net, nft_trans_set(trans));
+@@ -9824,6 +9824,8 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ nft_net = nft_pernet(net);
+ deleted = 0;
+ mutex_lock(&nft_net->commit_mutex);
++ if (!list_empty(&nf_tables_destroy_list))
++ rcu_barrier();
+ again:
+ list_for_each_entry(table, &nft_net->tables, list) {
+ if (nft_table_has_owner(table) &&
+diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
+index f6102e6f51617..730d2205f1976 100644
+--- a/net/rose/rose_link.c
++++ b/net/rose/rose_link.c
+@@ -236,6 +236,9 @@ void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, uns
+ unsigned char *dptr;
+ int len;
+
++ if (!neigh->dev)
++ return;
++
+ len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3;
+
+ if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index f1e013e3f04a9..935d90874b1b7 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -72,6 +72,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ {
+ struct red_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *child = q->qdisc;
++ unsigned int len;
+ int ret;
+
+ q->vars.qavg = red_calc_qavg(&q->parms,
+@@ -126,9 +127,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ break;
+ }
+
++ len = qdisc_pkt_len(skb);
+ ret = qdisc_enqueue(skb, child, to_free);
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+- qdisc_qstats_backlog_inc(sch, skb);
++ sch->qstats.backlog += len;
+ sch->q.qlen++;
+ } else if (net_xmit_drop_count(ret)) {
+ q->stats.pdrop++;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 26f81e2e1dfba..d5ddf283ed8e2 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2744,14 +2744,14 @@ static int __init smc_init(void)
+
+ rc = register_pernet_subsys(&smc_net_stat_ops);
+ if (rc)
+- return rc;
++ goto out_pernet_subsys;
+
+ smc_ism_init();
+ smc_clc_init();
+
+ rc = smc_nl_init();
+ if (rc)
+- goto out_pernet_subsys;
++ goto out_pernet_subsys_stat;
+
+ rc = smc_pnet_init();
+ if (rc)
+@@ -2829,6 +2829,8 @@ out_pnet:
+ smc_pnet_exit();
+ out_nl:
+ smc_nl_exit();
++out_pernet_subsys_stat:
++ unregister_pernet_subsys(&smc_net_stat_ops);
+ out_pernet_subsys:
+ unregister_pernet_subsys(&smc_net_ops);
+
+diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
+index a7020b1f3ec72..55da1b627a7db 100644
+--- a/net/sunrpc/sysfs.c
++++ b/net/sunrpc/sysfs.c
+@@ -525,13 +525,16 @@ void rpc_sysfs_client_setup(struct rpc_clnt *clnt,
+ struct net *net)
+ {
+ struct rpc_sysfs_client *rpc_client;
++ struct rpc_sysfs_xprt_switch *xswitch =
++ (struct rpc_sysfs_xprt_switch *)xprt_switch->xps_sysfs;
++
++ if (!xswitch)
++ return;
+
+ rpc_client = rpc_sysfs_client_alloc(rpc_sunrpc_client_kobj,
+ net, clnt->cl_clid);
+ if (rpc_client) {
+ char name[] = "switch";
+- struct rpc_sysfs_xprt_switch *xswitch =
+- (struct rpc_sysfs_xprt_switch *)xprt_switch->xps_sysfs;
+ int ret;
+
+ clnt->cl_sysfs = rpc_client;
+@@ -565,6 +568,8 @@ void rpc_sysfs_xprt_switch_setup(struct rpc_xprt_switch *xprt_switch,
+ rpc_xprt_switch->xprt_switch = xprt_switch;
+ rpc_xprt_switch->xprt = xprt;
+ kobject_uevent(&rpc_xprt_switch->kobject, KOBJ_ADD);
++ } else {
++ xprt_switch->xps_sysfs = NULL;
+ }
+ }
+
+@@ -576,6 +581,9 @@ void rpc_sysfs_xprt_setup(struct rpc_xprt_switch *xprt_switch,
+ struct rpc_sysfs_xprt_switch *switch_obj =
+ (struct rpc_sysfs_xprt_switch *)xprt_switch->xps_sysfs;
+
++ if (!switch_obj)
++ return;
++
+ rpc_xprt = rpc_sysfs_xprt_alloc(&switch_obj->kobject, xprt, gfp_flags);
+ if (rpc_xprt) {
+ xprt->xprt_sysfs = rpc_xprt;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index b7be8d066753c..a579e28bd213f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -504,12 +504,6 @@ static void unix_sock_destructor(struct sock *sk)
+
+ skb_queue_purge(&sk->sk_receive_queue);
+
+-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+- if (u->oob_skb) {
+- kfree_skb(u->oob_skb);
+- u->oob_skb = NULL;
+- }
+-#endif
+ WARN_ON(refcount_read(&sk->sk_wmem_alloc));
+ WARN_ON(!sk_unhashed(sk));
+ WARN_ON(sk->sk_socket);
+@@ -556,6 +550,13 @@ static void unix_release_sock(struct sock *sk, int embrion)
+
+ unix_state_unlock(sk);
+
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++ if (u->oob_skb) {
++ kfree_skb(u->oob_skb);
++ u->oob_skb = NULL;
++ }
++#endif
++
+ wake_up_interruptible_all(&u->peer_wait);
+
+ if (skpair != NULL) {
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 5d46036f3ad74..dc36a46ce0e75 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1897,8 +1897,11 @@ static int vsock_connectible_wait_data(struct sock *sk,
+ err = 0;
+ transport = vsk->transport;
+
+- while ((data = vsock_connectible_has_data(vsk)) == 0) {
++ while (1) {
+ prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
++ data = vsock_connectible_has_data(vsk);
++ if (data != 0)
++ break;
+
+ if (sk->sk_err != 0 ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 5fc8986c3c77c..bc751fa5adad7 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -401,8 +401,10 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+ &tmpbuf, size, GFP_NOFS);
+ dput(dentry);
+
+- if (ret < 0 || !tmpbuf)
+- return ret;
++ if (ret < 0 || !tmpbuf) {
++ size = ret;
++ goto out_free;
++ }
+
+ fs_ns = inode->i_sb->s_user_ns;
+ cap = (struct vfs_cap_data *) tmpbuf;
+diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
+index 3e2c6f2ed587f..d64020c1922c2 100644
+--- a/tools/include/nolibc/nolibc.h
++++ b/tools/include/nolibc/nolibc.h
+@@ -2408,9 +2408,9 @@ static __attribute__((unused))
+ int memcmp(const void *s1, const void *s2, size_t n)
+ {
+ size_t ofs = 0;
+- char c1 = 0;
++ int c1 = 0;
+
+- while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
++ while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) {
+ ofs++;
+ }
+ return c1;