summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-01-13 14:27:29 -0500
committerMike Pagano <mpagano@gentoo.org>2019-01-13 14:27:29 -0500
commitef1269908b6b202fc1ae1eea5f49b0e3994d79c8 (patch)
tree2cd0e35a0ab686e7694239f578bce0aafa0fe601
parentproj/linux-patches: Linux patch 4.14.92 (diff)
downloadlinux-patches-ef1269908b6b202fc1ae1eea5f49b0e3994d79c8.tar.gz
linux-patches-ef1269908b6b202fc1ae1eea5f49b0e3994d79c8.tar.bz2
linux-patches-ef1269908b6b202fc1ae1eea5f49b0e3994d79c8.zip
proj/linux-patches: Linux patch 4.14.934.14-100
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1092_linux-4.14.93.patch3993
2 files changed, 3997 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 45efed8b..6f666099 100644
--- a/0000_README
+++ b/0000_README
@@ -411,6 +411,10 @@ Patch: 1091_4.14.92.patch
From: http://www.kernel.org
Desc: Linux 4.14.92
+Patch: 1092_4.14.93.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.93
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1092_linux-4.14.93.patch b/1092_linux-4.14.93.patch
new file mode 100644
index 00000000..23543fc9
--- /dev/null
+++ b/1092_linux-4.14.93.patch
@@ -0,0 +1,3993 @@
+diff --git a/Makefile b/Makefile
+index be4d1f25cb29..a521e4cbd66f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 92
++SUBLEVEL = 93
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+@@ -479,18 +479,18 @@ endif
+
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+-CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
++CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
+ GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+-CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
++CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+ ifneq ($(GCC_TOOLCHAIN),)
+-CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
++CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
+ endif
+-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
++CLANG_FLAGS += -no-integrated-as
++KBUILD_CFLAGS += $(CLANG_FLAGS)
++KBUILD_AFLAGS += $(CLANG_FLAGS)
++export CLANG_FLAGS
+ endif
+
+ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+@@ -954,11 +954,6 @@ ifdef CONFIG_STACK_VALIDATION
+ ifeq ($(has_libelf),1)
+ objtool_target := tools/objtool FORCE
+ else
+- ifdef CONFIG_UNWINDER_ORC
+- $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+- else
+- $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+- endif
+ SKIP_STACK_VALIDATION := 1
+ export SKIP_STACK_VALIDATION
+ endif
+@@ -1102,6 +1097,14 @@ uapi-asm-generic:
+
+ PHONY += prepare-objtool
+ prepare-objtool: $(objtool_target)
++ifeq ($(SKIP_STACK_VALIDATION),1)
++ifdef CONFIG_UNWINDER_ORC
++ @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
++ @false
++else
++ @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
++endif
++endif
+
+ # Check for CONFIG flags that require compiler support. Abort the build
+ # after .config has been processed, but before the kernel build starts.
+diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
+index e7998308861f..f8caea16bc2d 100644
+--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
++++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
+@@ -117,13 +117,17 @@
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+- clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+- clock-names = "slow";
+ regulator-name = "reg_wlan";
+ startup-delay-us = <70000>;
+ gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
++
++ usdhc2_pwrseq: usdhc2_pwrseq {
++ compatible = "mmc-pwrseq-simple";
++ clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
++ clock-names = "ext_clock";
++ };
+ };
+
+ &adc1 {
+@@ -430,6 +434,7 @@
+ bus-width = <4>;
+ non-removable;
+ vmmc-supply = <&reg_wlan>;
++ mmc-pwrseq = <&usdhc2_pwrseq>;
+ cap-power-off-card;
+ keep-power-in-suspend;
+ status = "okay";
+diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
+index c5a5c3a70ab1..edb888ac5ad3 100644
+--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
++++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
+@@ -108,7 +108,7 @@ int __init imx6sx_cpuidle_init(void)
+ * except for power up sw2iso which need to be
+ * larger than LDO ramp up time.
+ */
+- imx_gpc_set_arm_power_up_timing(2, 1);
++ imx_gpc_set_arm_power_up_timing(0xf, 1);
+ imx_gpc_set_arm_power_down_timing(1, 1);
+
+ return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 48f2b3657507..0c5f70e6d5cf 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
+ # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+ # for relative relocs, since this leads to better Image compression
+ # with the relocation offsets always being zero.
+-LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
++LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
+ $(call ld-option, --no-apply-dynamic-relocs)
+ endif
+
+diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
+index c7fcb232fe47..40f9f0b078a4 100644
+--- a/arch/arm64/kernel/image.h
++++ b/arch/arm64/kernel/image.h
+@@ -75,16 +75,6 @@
+
+ __efistub_stext_offset = stext - _text;
+
+-/*
+- * Prevent the symbol aliases below from being emitted into the kallsyms
+- * table, by forcing them to be absolute symbols (which are conveniently
+- * ignored by scripts/kallsyms) rather than section relative symbols.
+- * The distinction is only relevant for partial linking, and only for symbols
+- * that are defined within a section declaration (which is not the case for
+- * the definitions below) so the resulting values will be identical.
+- */
+-#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
+-
+ /*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+@@ -94,27 +84,27 @@ __efistub_stext_offset = stext - _text;
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+-__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
+-__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
+-__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
+-__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
+-__efistub_memset = KALLSYMS_HIDE(__pi_memset);
+-__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
+-__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
+-__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
+-__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
+-__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
++__efistub_memcmp = __pi_memcmp;
++__efistub_memchr = __pi_memchr;
++__efistub_memcpy = __pi_memcpy;
++__efistub_memmove = __pi_memmove;
++__efistub_memset = __pi_memset;
++__efistub_strlen = __pi_strlen;
++__efistub_strnlen = __pi_strnlen;
++__efistub_strcmp = __pi_strcmp;
++__efistub_strncmp = __pi_strncmp;
++__efistub___flush_dcache_area = __pi___flush_dcache_area;
+
+ #ifdef CONFIG_KASAN
+-__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
+-__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
+-__efistub___memset = KALLSYMS_HIDE(__pi_memset);
++__efistub___memcpy = __pi_memcpy;
++__efistub___memmove = __pi_memmove;
++__efistub___memset = __pi_memset;
+ #endif
+
+-__efistub__text = KALLSYMS_HIDE(_text);
+-__efistub__end = KALLSYMS_HIDE(_end);
+-__efistub__edata = KALLSYMS_HIDE(_edata);
+-__efistub_screen_info = KALLSYMS_HIDE(screen_info);
++__efistub__text = _text;
++__efistub__end = _end;
++__efistub__edata = _edata;
++__efistub_screen_info = screen_info;
+
+ #endif
+
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index ddfd3c0942f7..6edfdf5b061d 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -99,7 +99,8 @@ SECTIONS
+ *(.discard)
+ *(.discard.*)
+ *(.interp .dynamic)
+- *(.dynsym .dynstr .hash)
++ *(.dynsym .dynstr .hash .gnu.hash)
++ *(.eh_frame)
+ }
+
+ . = KIMAGE_VADDR + TEXT_OFFSET;
+@@ -176,12 +177,12 @@ SECTIONS
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+- .rela : ALIGN(8) {
++ .rela.dyn : ALIGN(8) {
+ *(.rela .rela*)
+ }
+
+- __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
+- __rela_size = SIZEOF(.rela);
++ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
++ __rela_size = SIZEOF(.rela.dyn);
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __initdata_end = .;
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 1381693a4a51..7452e50f4d1f 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -236,7 +236,12 @@ endif
+
+ # Work around a gcc code-gen bug with -fno-omit-frame-pointer.
+ ifeq ($(CONFIG_FUNCTION_TRACER),y)
+-KBUILD_CFLAGS += -mno-sched-epilog
++# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
++ifneq ($(cc-name),clang)
++KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
++endif
+ endif
+
+ cpu-as-$(CONFIG_4xx) += -Wa,-m405
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index b479926f0167..e2a5a932c24a 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -49,6 +49,11 @@ endif
+
+ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
+
++ifeq ($(cc-name),clang)
++BOOTCFLAGS += $(CLANG_FLAGS)
++BOOTAFLAGS += $(CLANG_FLAGS)
++endif
++
+ ifdef CONFIG_DEBUG_INFO
+ BOOTCFLAGS += -g
+ endif
+diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
+index 32dfe6d083f3..9b9d17437373 100644
+--- a/arch/powerpc/boot/crt0.S
++++ b/arch/powerpc/boot/crt0.S
+@@ -15,7 +15,7 @@
+ RELA = 7
+ RELACOUNT = 0x6ffffff9
+
+- .text
++ .data
+ /* A procedure descriptor used when booting this as a COFF file.
+ * When making COFF, this comes first in the link and we're
+ * linked at 0x500000.
+@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
+ .globl _zimage_start_opd
+ _zimage_start_opd:
+ .long 0x500000, 0, 0, 0
++ .text
++ b _zimage_start
+
+ #ifdef __powerpc64__
+ .balign 8
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 1479c61e29c5..a1089c9a9aa5 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -5,6 +5,9 @@
+
+ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
++# Disable clang warning for using setjmp without setjmp.h header
++CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header)
++
+ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+ ifeq ($(CONFIG_PPC64),y)
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 92fb1c8dbbd8..636ea854808e 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -866,7 +866,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ /* If TM bits are set to the reserved value, it's an invalid context */
+ if (MSR_TM_RESV(msr_hi))
+ return 1;
+- /* Pull in the MSR TM bits from the user context */
++
++ /*
++ * Disabling preemption, since it is unsafe to be preempted
++ * with MSR[TS] set without recheckpointing.
++ */
++ preempt_disable();
++
++ /*
++ * CAUTION:
++ * After regs->MSR[TS] being updated, make sure that get_user(),
++ * put_user() or similar functions are *not* called. These
++ * functions can generate page faults which will cause the process
++ * to be de-scheduled with MSR[TS] set but without calling
++ * tm_recheckpoint(). This can cause a bug.
++ *
++ * Pull in the MSR TM bits from the user context
++ */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ /* Now, recheckpoint. This loads up all of the checkpointed (older)
+ * registers, including FP and V[S]Rs. After recheckpointing, the
+@@ -891,6 +907,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ }
+ #endif
+
++ preempt_enable();
++
+ return 0;
+ }
+ #endif
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index b2c002993d78..979b9463e17b 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -452,20 +452,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
+ if (MSR_TM_RESV(msr))
+ return -EINVAL;
+
+- /* pull in MSR TS bits from user context */
+- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+-
+- /*
+- * Ensure that TM is enabled in regs->msr before we leave the signal
+- * handler. It could be the case that (a) user disabled the TM bit
+- * through the manipulation of the MSR bits in uc_mcontext or (b) the
+- * TM bit was disabled because a sufficient number of context switches
+- * happened whilst in the signal handler and load_tm overflowed,
+- * disabling the TM bit. In either case we can end up with an illegal
+- * TM state leading to a TM Bad Thing when we return to userspace.
+- */
+- regs->msr |= MSR_TM;
+-
+ /* pull in MSR LE from user context */
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+@@ -557,6 +543,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
+ tm_enable();
+ /* Make sure the transaction is marked as failed */
+ tsk->thread.tm_texasr |= TEXASR_FS;
++
++ /*
++ * Disabling preemption, since it is unsafe to be preempted
++ * with MSR[TS] set without recheckpointing.
++ */
++ preempt_disable();
++
++ /* pull in MSR TS bits from user context */
++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
++
++ /*
++ * Ensure that TM is enabled in regs->msr before we leave the signal
++ * handler. It could be the case that (a) user disabled the TM bit
++ * through the manipulation of the MSR bits in uc_mcontext or (b) the
++ * TM bit was disabled because a sufficient number of context switches
++ * happened whilst in the signal handler and load_tm overflowed,
++ * disabling the TM bit. In either case we can end up with an illegal
++ * TM state leading to a TM Bad Thing when we return to userspace.
++ *
++ * CAUTION:
++ * After regs->MSR[TS] being updated, make sure that get_user(),
++ * put_user() or similar functions are *not* called. These
++ * functions can generate page faults which will cause the process
++ * to be de-scheduled with MSR[TS] set but without calling
++ * tm_recheckpoint(). This can cause a bug.
++ */
++ regs->msr |= MSR_TM;
++
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&tsk->thread, msr);
+
+@@ -570,6 +584,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
+ regs->msr |= MSR_VEC;
+ }
+
++ preempt_enable();
++
+ return err;
+ }
+ #endif
+diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
+index 31c1c61afaa4..0bbaf7344872 100644
+--- a/arch/powerpc/mm/dump_linuxpagetables.c
++++ b/arch/powerpc/mm/dump_linuxpagetables.c
+@@ -19,6 +19,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/io.h>
+ #include <linux/mm.h>
++#include <linux/highmem.h>
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <asm/fixmap.h>
+diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
+index 1bc3abb237cd..549e99e71112 100644
+--- a/arch/powerpc/xmon/Makefile
++++ b/arch/powerpc/xmon/Makefile
+@@ -1,7 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for xmon
+
+-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
++# Disable clang warning for using setjmp without setjmp.h header
++subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
++
++subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
+
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 7764617b8f9c..bf6d2692fc60 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -94,6 +94,11 @@ typedef struct { pteval_t pte; } pte_t;
+ # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
+ #endif
+
++#define GUARD_HOLE_PGD_ENTRY -256UL
++#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
++#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
++#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
++
+ #define LDT_PGD_ENTRY -240UL
+ #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index 2a4849e92831..6bca45d06676 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -51,10 +51,10 @@ struct addr_marker {
+ enum address_markers_idx {
+ USER_SPACE_NR = 0,
+ KERNEL_SPACE_NR,
+- LOW_KERNEL_NR,
+-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ LDT_NR,
+ #endif
++ LOW_KERNEL_NR,
+ VMALLOC_START_NR,
+ VMEMMAP_START_NR,
+ #ifdef CONFIG_KASAN
+@@ -62,9 +62,6 @@ enum address_markers_idx {
+ KASAN_SHADOW_END_NR,
+ #endif
+ CPU_ENTRY_AREA_NR,
+-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
+- LDT_NR,
+-#endif
+ #ifdef CONFIG_X86_ESPFIX64
+ ESPFIX_START_NR,
+ #endif
+@@ -465,11 +462,11 @@ static inline bool is_hypervisor_range(int idx)
+ {
+ #ifdef CONFIG_X86_64
+ /*
+- * ffff800000000000 - ffff87ffffffffff is reserved for
+- * the hypervisor.
++ * A hole in the beginning of kernel address space reserved
++ * for a hypervisor.
+ */
+- return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
+- (idx < pgd_index(__PAGE_OFFSET));
++ return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
++ (idx < pgd_index(GUARD_HOLE_END_ADDR));
+ #else
+ return false;
+ #endif
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index b33fa127a613..7631e6130d44 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -614,19 +614,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long limit)
+ {
+ int i, nr, flush = 0;
+- unsigned hole_low, hole_high;
++ unsigned hole_low = 0, hole_high = 0;
+
+ /* The limit is the last byte to be touched */
+ limit--;
+ BUG_ON(limit >= FIXADDR_TOP);
+
++#ifdef CONFIG_X86_64
+ /*
+ * 64-bit has a great big hole in the middle of the address
+- * space, which contains the Xen mappings. On 32-bit these
+- * will end up making a zero-sized hole and so is a no-op.
++ * space, which contains the Xen mappings.
+ */
+- hole_low = pgd_index(USER_LIMIT);
+- hole_high = pgd_index(PAGE_OFFSET);
++ hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
++ hole_high = pgd_index(GUARD_HOLE_END_ADDR);
++#endif
+
+ nr = pgd_index(limit) + 1;
+ for (i = 0; i < nr; i++) {
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 27b202c64c84..a46776a84480 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -366,8 +366,10 @@ static ssize_t backing_dev_store(struct device *dev,
+
+ bdev = bdgrab(I_BDEV(inode));
+ err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+- if (err < 0)
++ if (err < 0) {
++ bdev = NULL;
+ goto out;
++ }
+
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 45c65f805fd6..be85d4b39e99 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -777,9 +777,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
+ "marvell,armada-370-gpio"))
+ return 0;
+
+- if (IS_ERR(mvchip->clk))
+- return PTR_ERR(mvchip->clk);
+-
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+@@ -790,6 +787,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
+ if (!res)
+ return 0;
+
++ if (IS_ERR(mvchip->clk))
++ return PTR_ERR(mvchip->clk);
++
+ /*
+ * Use set A for lines of GPIO chip with id 0, B for GPIO chip
+ * with id 1. Don't allow further GPIO chips to be used for PWM.
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 502c7eb708c2..5bd3c2ef0067 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -354,6 +354,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ if (vc4_state->is_unity)
+ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ } else {
++ vc4_state->is_yuv = false;
+ vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+ vc4_state->y_scaling[1] = VC4_SCALING_NONE;
+ }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index fcc688df694c..28ae3dc57103 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -17,6 +17,9 @@
+ #ifndef HID_IDS_H_FILE
+ #define HID_IDS_H_FILE
+
++#define USB_VENDOR_ID_258A 0x258a
++#define USB_DEVICE_ID_258A_6A88 0x6a88
++
+ #define USB_VENDOR_ID_3M 0x0596
+ #define USB_DEVICE_ID_3M1968 0x0500
+ #define USB_DEVICE_ID_3M2256 0x0502
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 1882a4ab0f29..98b059d79bc8 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
+
+ static const struct hid_device_id ite_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
+index 50b89ea0e60f..247a62604d1f 100644
+--- a/drivers/hv/Kconfig
++++ b/drivers/hv/Kconfig
+@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
+
+ config HYPERV
+ tristate "Microsoft Hyper-V client drivers"
+- depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
++ depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+ select PARAVIRT
+ help
+ Select this option to run Linux as a Hyper-V client operating
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index 58ac786634dc..82f2b70ca5bf 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -1431,7 +1431,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
+ if (!end)
+ break;
+
+- len -= end - p;
++ /* consume the number and the following comma, hence +1 */
++ len -= end - p + 1;
+ p = end + 1;
+ } while (len);
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 4d84b010b3ee..74328561bee2 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -845,11 +845,16 @@ static enum resp_states do_complete(struct rxe_qp *qp,
+
+ memset(&cqe, 0, sizeof(cqe));
+
+- wc->wr_id = wqe->wr_id;
+- wc->status = qp->resp.status;
+- wc->qp = &qp->ibqp;
++ if (qp->rcq->is_user) {
++ uwc->status = qp->resp.status;
++ uwc->qp_num = qp->ibqp.qp_num;
++ uwc->wr_id = wqe->wr_id;
++ } else {
++ wc->status = qp->resp.status;
++ wc->qp = &qp->ibqp;
++ wc->wr_id = wqe->wr_id;
++ }
+
+- /* fields after status are not required for errors */
+ if (wc->status == IB_WC_SUCCESS) {
+ wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index ce8e2baf31bb..616fdd94b069 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -126,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
+ {
+ struct omap4_keypad *keypad_data = dev_id;
+
+- if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
+- /* Disable interrupts */
+- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+- OMAP4_VAL_IRQDISABLE);
++ if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
+ return IRQ_WAKE_THREAD;
+- }
+
+ return IRQ_NONE;
+ }
+@@ -173,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+
+- /* enable interrupts */
+- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+- OMAP4_DEF_IRQENABLE_EVENTEN |
+- OMAP4_DEF_IRQENABLE_LONGKEY);
+-
+ return IRQ_HANDLED;
+ }
+
+@@ -214,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
+
+ disable_irq(keypad_data->irq);
+
+- /* Disable interrupts */
++ /* Disable interrupts and wake-up events */
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_VAL_IRQDISABLE);
++ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
+
+ /* clear pending interrupts */
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+@@ -364,7 +356,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ }
+
+ error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
+- omap4_keypad_irq_thread_fn, 0,
++ omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
+ "omap4-keypad", keypad_data);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register interrupt\n");
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 54f0d037b5b6..e9ec5d10e0a9 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -171,6 +171,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0046", /* X250 */
+ "LEN004a", /* W541 */
+ "LEN005b", /* P50 */
++ "LEN005e", /* T560 */
+ "LEN0071", /* T480 */
+ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+ "LEN0073", /* X1 Carbon G5 (Elantech) */
+@@ -178,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0096", /* X280 */
+ "LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN200f", /* T450s */
++ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
+ NULL
+ };
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index e86c1c8ec7f6..34006354d2eb 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2093,7 +2093,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+ * than default. Unnecessary for PT mode.
+ */
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
++ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+@@ -2107,7 +2107,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+ translation = CONTEXT_TT_MULTI_LEVEL;
+
+ context_set_address_root(context, virt_to_phys(pgd));
+- context_set_address_width(context, iommu->agaw);
++ context_set_address_width(context, agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index bda3caca23ca..8573c70a1880 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -139,10 +139,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+ {
+ struct scatterlist sg;
+
+- sg_init_one(&sg, data, len);
+- ahash_request_set_crypt(req, &sg, NULL, len);
+-
+- return verity_complete_op(res, crypto_ahash_update(req));
++ if (likely(!is_vmalloc_addr(data))) {
++ sg_init_one(&sg, data, len);
++ ahash_request_set_crypt(req, &sg, NULL, len);
++ return verity_complete_op(res, crypto_ahash_update(req));
++ } else {
++ do {
++ int r;
++ size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
++ flush_kernel_vmap_range((void *)data, this_step);
++ sg_init_table(&sg, 1);
++ sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
++ ahash_request_set_crypt(req, &sg, NULL, this_step);
++ r = verity_complete_op(res, crypto_ahash_update(req));
++ if (unlikely(r))
++ return r;
++ data += this_step;
++ len -= this_step;
++ } while (len);
++ return 0;
++ }
+ }
+
+ /*
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index ba6b0a90ecfb..532bfce7f072 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -20,7 +20,6 @@ struct dmz_bioctx {
+ struct dm_zone *zone;
+ struct bio *bio;
+ atomic_t ref;
+- blk_status_t status;
+ };
+
+ /*
+@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
+ {
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+
+- if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
+- bioctx->status = status;
+- bio_endio(bio);
++ if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
++ bio->bi_status = status;
++
++ if (atomic_dec_and_test(&bioctx->ref)) {
++ struct dm_zone *zone = bioctx->zone;
++
++ if (zone) {
++ if (bio->bi_status != BLK_STS_OK &&
++ bio_op(bio) == REQ_OP_WRITE &&
++ dmz_is_seq(zone))
++ set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
++ dmz_deactivate_zone(zone);
++ }
++ bio_endio(bio);
++ }
+ }
+
+ /*
+- * Partial clone read BIO completion callback. This terminates the
++ * Completion callback for an internally cloned target BIO. This terminates the
+ * target BIO when there are no more references to its context.
+ */
+-static void dmz_read_bio_end_io(struct bio *bio)
++static void dmz_clone_endio(struct bio *clone)
+ {
+- struct dmz_bioctx *bioctx = bio->bi_private;
+- blk_status_t status = bio->bi_status;
++ struct dmz_bioctx *bioctx = clone->bi_private;
++ blk_status_t status = clone->bi_status;
+
+- bio_put(bio);
++ bio_put(clone);
+ dmz_bio_endio(bioctx->bio, status);
+ }
+
+ /*
+- * Issue a BIO to a zone. The BIO may only partially process the
++ * Issue a clone of a target BIO. The clone may only partially process the
+ * original target BIO.
+ */
+-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
+- struct bio *bio, sector_t chunk_block,
+- unsigned int nr_blocks)
++static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
++ struct bio *bio, sector_t chunk_block,
++ unsigned int nr_blocks)
+ {
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+- sector_t sector;
+ struct bio *clone;
+
+- /* BIO remap sector */
+- sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+-
+- /* If the read is not partial, there is no need to clone the BIO */
+- if (nr_blocks == dmz_bio_blocks(bio)) {
+- /* Setup and submit the BIO */
+- bio->bi_iter.bi_sector = sector;
+- atomic_inc(&bioctx->ref);
+- generic_make_request(bio);
+- return 0;
+- }
+-
+- /* Partial BIO: we need to clone the BIO */
+ clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set);
+ if (!clone)
+ return -ENOMEM;
+
+- /* Setup the clone */
+- clone->bi_iter.bi_sector = sector;
++ bio_set_dev(clone, dmz->dev->bdev);
++ clone->bi_iter.bi_sector =
++ dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+ clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
+- clone->bi_end_io = dmz_read_bio_end_io;
++ clone->bi_end_io = dmz_clone_endio;
+ clone->bi_private = bioctx;
+
+ bio_advance(bio, clone->bi_iter.bi_size);
+
+- /* Submit the clone */
+ atomic_inc(&bioctx->ref);
+ generic_make_request(clone);
+
++ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
++ zone->wp_block += nr_blocks;
++
+ return 0;
+ }
+
+@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
+ if (nr_blocks) {
+ /* Valid blocks found: read them */
+ nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
+- ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
++ ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
+ chunk_block += nr_blocks;
+@@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
+ return 0;
+ }
+
+-/*
+- * Issue a write BIO to a zone.
+- */
+-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
+- struct bio *bio, sector_t chunk_block,
+- unsigned int nr_blocks)
+-{
+- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+-
+- /* Setup and submit the BIO */
+- bio_set_dev(bio, dmz->dev->bdev);
+- bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+- atomic_inc(&bioctx->ref);
+- generic_make_request(bio);
+-
+- if (dmz_is_seq(zone))
+- zone->wp_block += nr_blocks;
+-}
+-
+ /*
+ * Write blocks directly in a data zone, at the write pointer.
+ * If a buffer zone is assigned, invalidate the blocks written
+@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
+ return -EROFS;
+
+ /* Submit write */
+- dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
++ ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
++ if (ret)
++ return ret;
+
+ /*
+ * Validate the blocks in the data zone and invalidate
+@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
+ return -EROFS;
+
+ /* Submit write */
+- dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
++ ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
++ if (ret)
++ return ret;
+
+ /*
+ * Validate the blocks in the buffer zone
+@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ bioctx->zone = NULL;
+ bioctx->bio = bio;
+ atomic_set(&bioctx->ref, 1);
+- bioctx->status = BLK_STS_OK;
+
+ /* Set the BIO pending in the flush list */
+ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
+@@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-/*
+- * Completed target BIO processing.
+- */
+-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
+-{
+- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+-
+- if (bioctx->status == BLK_STS_OK && *error)
+- bioctx->status = *error;
+-
+- if (!atomic_dec_and_test(&bioctx->ref))
+- return DM_ENDIO_INCOMPLETE;
+-
+- /* Done */
+- bio->bi_status = bioctx->status;
+-
+- if (bioctx->zone) {
+- struct dm_zone *zone = bioctx->zone;
+-
+- if (*error && bio_op(bio) == REQ_OP_WRITE) {
+- if (dmz_is_seq(zone))
+- set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+- }
+- dmz_deactivate_zone(zone);
+- }
+-
+- return DM_ENDIO_DONE;
+-}
+-
+ /*
+ * Get zoned device information.
+ */
+@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
+ .ctr = dmz_ctr,
+ .dtr = dmz_dtr,
+ .map = dmz_map,
+- .end_io = dmz_end_io,
+ .io_hints = dmz_io_hints,
+ .prepare_ioctl = dmz_prepare_ioctl,
+ .postsuspend = dmz_suspend,
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index e786546bf3b8..52ddfa0fca94 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -4591,15 +4591,18 @@ static int handle_reshape_read_error(struct mddev *mddev,
+ /* Use sync reads to get the blocks from somewhere else */
+ int sectors = r10_bio->sectors;
+ struct r10conf *conf = mddev->private;
+- struct {
+- struct r10bio r10_bio;
+- struct r10dev devs[conf->copies];
+- } on_stack;
+- struct r10bio *r10b = &on_stack.r10_bio;
++ struct r10bio *r10b;
+ int slot = 0;
+ int idx = 0;
+ struct page **pages;
+
++ r10b = kmalloc(sizeof(*r10b) +
++ sizeof(struct r10dev) * conf->copies, GFP_NOIO);
++ if (!r10b) {
++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
++ return -ENOMEM;
++ }
++
+ /* reshape IOs share pages from .devs[0].bio */
+ pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
+
+@@ -4648,11 +4651,13 @@ static int handle_reshape_read_error(struct mddev *mddev,
+ /* couldn't read this block, must give up */
+ set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery);
++ kfree(r10b);
+ return -EIO;
+ }
+ sectors -= s;
+ idx++;
+ }
++ kfree(r10b);
+ return 0;
+ }
+
+diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
+index 147b83011b58..2769eb0dfcf5 100644
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
+ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
+ dma_addr_t *dma_handle)
+ {
+- if (get_order(size) > MAX_ORDER)
++ if (get_order(size) >= MAX_ORDER)
+ return NULL;
+
+ return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+index 3b889efddf78..50dd6bf176d0 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -29,9 +29,6 @@
+ #define RES_RING_CSR 1
+ #define RES_RING_CMD 2
+
+-static const struct of_device_id xgene_enet_of_match[];
+-static const struct acpi_device_id xgene_enet_acpi_match[];
+-
+ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
+ {
+ struct xgene_enet_raw_desc16 *raw_desc;
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 1b7f4342dab9..d17a5c911524 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1278,6 +1278,7 @@ enum sp_rtnl_flag {
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
+ BNX2X_SP_RTNL_CHANGE_UDP_PORT,
++ BNX2X_SP_RTNL_UPDATE_SVID,
+ };
+
+ enum bnx2x_iov_flag {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index bd3e3f080ebf..022b06e770d1 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
++ /* Prepare parameters for function state transitions */
++ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
++ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
++
+ if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
+ int func = BP_ABS_FUNC(bp);
+ u32 val;
+@@ -4301,7 +4305,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+ bnx2x_handle_eee_event(bp);
+
+ if (val & DRV_STATUS_OEM_UPDATE_SVID)
+- bnx2x_handle_update_svid_cmd(bp);
++ bnx2x_schedule_sp_rtnl(bp,
++ BNX2X_SP_RTNL_UPDATE_SVID, 0);
+
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+@@ -8462,6 +8467,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ ramrod_param.user_req.u.vlan.vlan = vlan;
++ __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+@@ -8482,6 +8488,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ return rc;
+ }
+
++static int bnx2x_del_all_vlans(struct bnx2x *bp)
++{
++ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
++ unsigned long ramrod_flags = 0, vlan_flags = 0;
++ struct bnx2x_vlan_entry *vlan;
++ int rc;
++
++ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
++ __set_bit(BNX2X_VLAN, &vlan_flags);
++ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
++ if (rc)
++ return rc;
++
++ /* Mark that hw forgot all entries */
++ list_for_each_entry(vlan, &bp->vlan_reg, link)
++ vlan->hw = false;
++ bp->vlan_cnt = 0;
++
++ return 0;
++}
++
+ int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
+@@ -9320,6 +9347,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+ rc);
+
++ /* The whole *vlan_obj structure may be not initialized if VLAN
++ * filtering offload is not supported by hardware. Currently this is
++ * true for all hardware covered by CHIP_IS_E1x().
++ */
++ if (!CHIP_IS_E1x(bp)) {
++ /* Remove all currently configured VLANs */
++ rc = bnx2x_del_all_vlans(bp);
++ if (rc < 0)
++ BNX2X_ERR("Failed to delete all VLANs\n");
++ }
++
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+@@ -10342,6 +10380,9 @@ sp_rtnl_not_reset:
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
++ if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
++ bnx2x_handle_update_svid_cmd(bp);
++
+ if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+ &bp->sp_rtnl_state)) {
+ if (bnx2x_udp_port_update(bp)) {
+@@ -11733,8 +11774,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+- if (!bp->cnic_eth_dev.max_fcoe_conn)
++ if (!bp->cnic_eth_dev.max_fcoe_conn) {
+ bp->flags |= NO_FCOE_FLAG;
++ eth_zero_addr(bp->fip_mac);
++ }
+ }
+
+ static void bnx2x_get_cnic_info(struct bnx2x *bp)
+@@ -13004,13 +13047,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+
+ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+ {
+- struct bnx2x_vlan_entry *vlan;
+-
+- /* The hw forgot all entries after reload */
+- list_for_each_entry(vlan, &bp->vlan_reg, link)
+- vlan->hw = false;
+- bp->vlan_cnt = 0;
+-
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+index 0bf2fd470819..7a6e82db4231 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+@@ -265,6 +265,7 @@ enum {
+ BNX2X_ETH_MAC,
+ BNX2X_ISCSI_ETH_MAC,
+ BNX2X_NETQ_ETH_MAC,
++ BNX2X_VLAN,
+ BNX2X_DONT_CONSUME_CAM_CREDIT,
+ BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+ };
+@@ -272,7 +273,8 @@ enum {
+ #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+- 1 << BNX2X_NETQ_ETH_MAC)
++ 1 << BNX2X_NETQ_ETH_MAC | \
++ 1 << BNX2X_VLAN)
+ #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 0b2f9ddfb1c4..9046993947cc 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -675,6 +675,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ desc_64 = macb_64b_desc(bp, desc);
+ desc_64->addrh = upper_32_bits(addr);
++ /* The low bits of RX address contain the RX_USED bit, clearing
++ * of which allows packet RX. Make sure the high bits are also
++ * visible to HW at that point.
++ */
++ dma_wmb();
+ }
+ #endif
+ desc->addr = lower_32_bits(addr);
+@@ -918,14 +923,19 @@ static void gem_rx_refill(struct macb *bp)
+
+ if (entry == bp->rx_ring_size - 1)
+ paddr |= MACB_BIT(RX_WRAP);
+- macb_set_addr(bp, desc, paddr);
+ desc->ctrl = 0;
++ /* Setting addr clears RX_USED and allows reception,
++ * make sure ctrl is cleared first to avoid a race.
++ */
++ dma_wmb();
++ macb_set_addr(bp, desc, paddr);
+
+ /* properly align Ethernet header */
+ skb_reserve(skb, NET_IP_ALIGN);
+ } else {
+- desc->addr &= ~MACB_BIT(RX_USED);
+ desc->ctrl = 0;
++ dma_wmb();
++ desc->addr &= ~MACB_BIT(RX_USED);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+index bf930ab3c2bd..a185a8be7999 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+@@ -379,6 +379,9 @@ void hns_ae_stop(struct hnae_handle *handle)
+
+ hns_ae_ring_enable_all(handle, 0);
+
++ /* clean rx fbd. */
++ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
++
+ (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+index 8c7bc5cf193c..5e8930d02f50 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*enable GE rX/tX */
+- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
++ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
++ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
++ /* enable rx pcs */
++ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
++ }
+ }
+
+ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*disable GE rX/tX */
+- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
++ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
++ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
++ /* disable rx pcs */
++ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
++ }
+ }
+
+ /* hns_gmac_get_en - get port enable
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+index 5a8dbd72fe45..07e117deeb0f 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+@@ -783,6 +783,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+ return rc;
+ }
+
++static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
++{
++ if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
++ return;
++
++ phy_device_remove(mac_cb->phy_dev);
++ phy_device_free(mac_cb->phy_dev);
++
++ mac_cb->phy_dev = NULL;
++}
++
+ #define MAC_MEDIA_TYPE_MAX_LEN 16
+
+ static const struct {
+@@ -1120,7 +1131,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
++ if (!dsaf_dev->mac_cb[i])
++ continue;
++
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
++ hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+index 1f056a6b167e..51d42d7f6074 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+@@ -934,6 +934,62 @@ static void hns_dsaf_tcam_mc_cfg(
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+ }
+
++/**
++ * hns_dsaf_tcam_uc_cfg_vague - INT
++ * @dsaf_dev: dsa fabric device struct pointer
++ * @address,
++ * @ptbl_tcam_data,
++ */
++static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
++ u32 address,
++ struct dsaf_tbl_tcam_data *tcam_data,
++ struct dsaf_tbl_tcam_data *tcam_mask,
++ struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
++{
++ spin_lock_bh(&dsaf_dev->tcam_lock);
++ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
++ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
++ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
++ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
++ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
++
++ /*Restore Match Data*/
++ tcam_mask->tbl_tcam_data_high = 0xffffffff;
++ tcam_mask->tbl_tcam_data_low = 0xffffffff;
++ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
++
++ spin_unlock_bh(&dsaf_dev->tcam_lock);
++}
++
++/**
++ * hns_dsaf_tcam_mc_cfg_vague - INT
++ * @dsaf_dev: dsa fabric device struct pointer
++ * @address,
++ * @ptbl_tcam_data,
++ * @ptbl_tcam_mask
++ * @ptbl_tcam_mcast
++ */
++static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
++ u32 address,
++ struct dsaf_tbl_tcam_data *tcam_data,
++ struct dsaf_tbl_tcam_data *tcam_mask,
++ struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
++{
++ spin_lock_bh(&dsaf_dev->tcam_lock);
++ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
++ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
++ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
++ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
++ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
++
++ /*Restore Match Data*/
++ tcam_mask->tbl_tcam_data_high = 0xffffffff;
++ tcam_mask->tbl_tcam_data_low = 0xffffffff;
++ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
++
++ spin_unlock_bh(&dsaf_dev->tcam_lock);
++}
++
+ /**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_id: dsa fabric id
+@@ -1491,6 +1547,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+ return DSAF_INVALID_ENTRY_IDX;
+ }
+
++/**
++ * hns_dsaf_find_empty_mac_entry_reverse
++ * search dsa fabric soft empty-entry from the end
++ * @dsaf_dev: dsa fabric device struct pointer
++ */
++static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
++{
++ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
++ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
++ int i;
++
++ soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
++ for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
++ /* search all entry from end to start.*/
++ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
++ return i;
++ soft_mac_entry--;
++ }
++ return DSAF_INVALID_ENTRY_IDX;
++}
++
+ /**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+@@ -2159,9 +2236,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+- DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
++ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
+ hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+- DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
++ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
+
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+@@ -2278,237 +2355,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+ DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+ p[223 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+- p[224 + i] = dsaf_read_dev(ddev,
++ p[226 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+ }
+
+- p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
++ p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+- p[228 + i] = dsaf_read_dev(ddev,
++ p[230 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+ }
+
+- p[231] = dsaf_read_dev(ddev,
+- DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
++ p[233] = dsaf_read_dev(ddev,
++ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
+
+ /* dsaf inode registers */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+- p[232 + i] = dsaf_read_dev(ddev,
++ p[234 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+- p[235 + i] = dsaf_read_dev(ddev,
++ p[237 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+- p[238 + i] = dsaf_read_dev(ddev,
++ p[240 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+- p[241 + i] = dsaf_read_dev(ddev,
++ p[243 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+- p[244 + i] = dsaf_read_dev(ddev,
++ p[246 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+- p[245 + i] = dsaf_read_dev(ddev,
++ p[249 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+- p[248 + i] = dsaf_read_dev(ddev,
++ p[252 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+- p[251 + i] = dsaf_read_dev(ddev,
++ p[255 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+- p[254 + i] = dsaf_read_dev(ddev,
++ p[258 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+- p[257 + i] = dsaf_read_dev(ddev,
++ p[261 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+- p[260 + i] = dsaf_read_dev(ddev,
++ p[264 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INER_ST_0_REG + j * 0x80);
+- p[263 + i] = dsaf_read_dev(ddev,
++ p[267 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+- p[266 + i] = dsaf_read_dev(ddev,
++ p[270 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+- p[269 + i] = dsaf_read_dev(ddev,
++ p[273 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+- p[272 + i] = dsaf_read_dev(ddev,
++ p[276 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+- p[275 + i] = dsaf_read_dev(ddev,
++ p[279 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+- p[278 + i] = dsaf_read_dev(ddev,
++ p[282 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+- p[281 + i] = dsaf_read_dev(ddev,
++ p[285 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+- p[284 + i] = dsaf_read_dev(ddev,
++ p[288 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+- p[287 + i] = dsaf_read_dev(ddev,
++ p[291 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+- p[290 + i] = dsaf_read_dev(ddev,
++ p[294 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+- p[293 + i] = dsaf_read_dev(ddev,
++ p[297 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+- p[296 + i] = dsaf_read_dev(ddev,
++ p[300 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+- p[299 + i] = dsaf_read_dev(ddev,
++ p[303 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+- p[302 + i] = dsaf_read_dev(ddev,
++ p[306 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+- p[305 + i] = dsaf_read_dev(ddev,
++ p[309 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+- p[308 + i] = dsaf_read_dev(ddev,
++ p[312 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+ }
+
+ /* dsaf onode registers */
+ for (i = 0; i < DSAF_XOD_NUM; i++) {
+- p[311 + i] = dsaf_read_dev(ddev,
++ p[315 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
+- p[319 + i] = dsaf_read_dev(ddev,
++ p[323 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
+- p[327 + i] = dsaf_read_dev(ddev,
++ p[331 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
+- p[335 + i] = dsaf_read_dev(ddev,
++ p[339 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
+- p[343 + i] = dsaf_read_dev(ddev,
++ p[347 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
+- p[351 + i] = dsaf_read_dev(ddev,
++ p[355 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
+ }
+
+- p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+- p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+- p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
++ p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
++ p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
++ p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+ for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+- p[362 + i] = dsaf_read_dev(ddev,
++ p[366 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_L_0_REG + j * 0x90);
+- p[365 + i] = dsaf_read_dev(ddev,
++ p[369 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_H_0_REG + j * 0x90);
+- p[368 + i] = dsaf_read_dev(ddev,
++ p[372 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+- p[371 + i] = dsaf_read_dev(ddev,
++ p[375 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+- p[374 + i] = dsaf_read_dev(ddev,
++ p[378 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+- p[377 + i] = dsaf_read_dev(ddev,
++ p[381 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+- p[380 + i] = dsaf_read_dev(ddev,
++ p[384 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+- p[383 + i] = dsaf_read_dev(ddev,
++ p[387 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+- p[386 + i] = dsaf_read_dev(ddev,
++ p[390 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+- p[389 + i] = dsaf_read_dev(ddev,
++ p[393 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+ }
+
+- p[392] = dsaf_read_dev(ddev,
++ p[396] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+- p[393] = dsaf_read_dev(ddev,
++ p[397] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+- p[394] = dsaf_read_dev(ddev,
++ p[398] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+- p[395] = dsaf_read_dev(ddev,
++ p[399] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+- p[396] = dsaf_read_dev(ddev,
++ p[400] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+- p[397] = dsaf_read_dev(ddev,
++ p[401] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+- p[398] = dsaf_read_dev(ddev,
++ p[402] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+- p[399] = dsaf_read_dev(ddev,
++ p[403] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+- p[400] = dsaf_read_dev(ddev,
++ p[404] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+- p[401] = dsaf_read_dev(ddev,
++ p[405] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+- p[402] = dsaf_read_dev(ddev,
++ p[406] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+- p[403] = dsaf_read_dev(ddev,
++ p[407] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+- p[404] = dsaf_read_dev(ddev,
++ p[408] = dsaf_read_dev(ddev,
+ DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+ /* dsaf voq registers */
+ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+ j = (i * DSAF_COMM_CHN + port) * 0x90;
+- p[405 + i] = dsaf_read_dev(ddev,
++ p[409 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+- p[408 + i] = dsaf_read_dev(ddev,
++ p[412 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+- p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+- p[414 + i] = dsaf_read_dev(ddev,
++ p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
++ p[418 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+- p[417 + i] = dsaf_read_dev(ddev,
++ p[421 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+- p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+- p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+- p[426 + i] = dsaf_read_dev(ddev,
++ p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
++ p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
++ p[430 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+- p[429 + i] = dsaf_read_dev(ddev,
++ p[433 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+- p[432 + i] = dsaf_read_dev(ddev,
++ p[436 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+- p[435 + i] = dsaf_read_dev(ddev,
++ p[439 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+- p[438 + i] = dsaf_read_dev(ddev,
++ p[442 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+ }
+
+ /* dsaf tbl registers */
+- p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+- p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+- p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+- p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+- p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+- p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+- p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+- p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+- p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+- p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+- p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+- p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+- p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+- p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+- p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+- p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+- p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+- p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+- p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+- p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+- p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+- p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+- p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
++ p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
++ p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
++ p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
++ p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
++ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
++ p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
++ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
++ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
++ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
++ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
++ p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
++ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
++ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
++ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
++ p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
++ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
++ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
++ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
++ p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
++ p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
++ p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
++ p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
++ p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ j = i * 0x8;
+- p[464 + 2 * i] = dsaf_read_dev(ddev,
++ p[468 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+- p[465 + 2 * i] = dsaf_read_dev(ddev,
++ p[469 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+ }
+
+- p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+- p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+- p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+- p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+- p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+- p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+- p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+- p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+- p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+- p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+- p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+- p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
++ p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
++ p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
++ p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
++ p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
++ p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
++ p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
++ p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
++ p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
++ p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
++ p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
++ p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
++ p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+ /* dsaf other registers */
+- p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+- p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+- p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+- p[495] = dsaf_read_dev(ddev,
++ p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
++ p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
++ p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
++ p[499] = dsaf_read_dev(ddev,
+ DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+- p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+- p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
++ p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
++ p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+ if (!is_ver1)
+- p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
++ p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
+ /* mark end of dsaf regs */
+- for (i = 499; i < 504; i++)
++ for (i = 503; i < 504; i++)
+ p[i] = 0xdddddddd;
+ }
+
+@@ -2666,58 +2743,156 @@ int hns_dsaf_get_regs_count(void)
+ return DSAF_DUMP_REGS_NUM;
+ }
+
+-/* Reserve the last TCAM entry for promisc support */
+-#define dsaf_promisc_tcam_entry(port) \
+- (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
+-void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+- u32 port, bool enable)
++static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
+ {
++ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
++ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
++ struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
++ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+- struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+- u16 entry_index;
+- struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
+- struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
++ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
++ struct dsaf_drv_mac_single_dest_entry mask_entry;
++ struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
++ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
++ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
++ struct dsaf_drv_tbl_tcam_key mac_key;
++ struct hns_mac_cb *mac_cb;
++ u8 addr[ETH_ALEN] = {0};
++ u8 port_num;
++ u16 mskid;
++
++ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
++ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
++ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
++ if (entry_index != DSAF_INVALID_ENTRY_IDX)
++ return;
++
++ /* put promisc tcam entry in the end. */
++ /* 1. set promisc unicast vague tcam entry. */
++ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
++ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
++ dev_err(dsaf_dev->dev,
++ "enable uc promisc failed (port:%#x)\n",
++ port);
++ return;
++ }
++
++ mac_cb = dsaf_dev->mac_cb[port];
++ (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
++ tbl_tcam_ucast.tbl_ucast_out_port = port_num;
+
+- if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
++ /* config uc vague table */
++ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
++ &tbl_tcam_mask_uc, &tbl_tcam_ucast);
++
++ /* update software entry */
++ soft_mac_entry = priv->soft_mac_tbl;
++ soft_mac_entry += entry_index;
++ soft_mac_entry->index = entry_index;
++ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
++ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
++ /* step back to the START for mc. */
++ soft_mac_entry = priv->soft_mac_tbl;
++
++ /* 2. set promisc multicast vague tcam entry. */
++ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
++ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
++ dev_err(dsaf_dev->dev,
++ "enable mc promisc failed (port:%#x)\n",
++ port);
+ return;
++ }
++
++ memset(&mask_entry, 0x0, sizeof(mask_entry));
++ memset(&mask_key, 0x0, sizeof(mask_key));
++ memset(&temp_key, 0x0, sizeof(temp_key));
++ mask_entry.addr[0] = 0x01;
++ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
++ port, mask_entry.addr);
++ tbl_tcam_mcast.tbl_mcast_item_vld = 1;
++ tbl_tcam_mcast.tbl_mcast_old_en = 0;
+
+- /* find the tcam entry index for promisc */
+- entry_index = dsaf_promisc_tcam_entry(port);
+-
+- memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+- memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+-
+- /* config key mask */
+- if (enable) {
+- dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
+- DSAF_TBL_TCAM_KEY_PORT_M,
+- DSAF_TBL_TCAM_KEY_PORT_S, port);
+- dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
+- DSAF_TBL_TCAM_KEY_PORT_M,
+- DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
+-
+- /* SUB_QID */
+- dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
+- DSAF_SERVICE_NW_NUM, true);
+- mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
++ if (port < DSAF_SERVICE_NW_NUM) {
++ mskid = port;
++ } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
++ mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+- mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
++ dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
++ dsaf_dev->ae_dev.name, port,
++ mask_key.high.val, mask_key.low.val);
++ return;
+ }
+
+- dev_dbg(dsaf_dev->dev,
+- "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+- dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
+- tbl_tcam_data.low.val, entry_index);
++ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
++ mskid % 32, 1);
++ memcpy(&temp_key, &mask_key, sizeof(mask_key));
++ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
++ (struct dsaf_tbl_tcam_data *)(&mask_key),
++ &tbl_tcam_mcast);
++
++ /* update software entry */
++ soft_mac_entry += entry_index;
++ soft_mac_entry->index = entry_index;
++ soft_mac_entry->tcam_key.high.val = temp_key.high.val;
++ soft_mac_entry->tcam_key.low.val = temp_key.low.val;
++}
+
+- /* config promisc entry with mask */
+- hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
+- (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
+- (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
+- &mac_data);
++static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
++{
++ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
++ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
++ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
++ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
++ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
++ struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
++ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
++ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
++ struct dsaf_drv_tbl_tcam_key mac_key;
++ u8 addr[ETH_ALEN] = {0};
+
+- /* config software entry */
++ /* 1. delete uc vague tcam entry. */
++ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
++ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
++ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
++
++ if (entry_index == DSAF_INVALID_ENTRY_IDX)
++ return;
++
++ /* config uc vague table */
++ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
++ &tbl_tcam_mask, &tbl_tcam_ucast);
++ /* update soft management table. */
++ soft_mac_entry = priv->soft_mac_tbl;
++ soft_mac_entry += entry_index;
++ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
++ /* step back to the START for mc. */
++ soft_mac_entry = priv->soft_mac_tbl;
++
++ /* 2. delete mc vague tcam entry. */
++ addr[0] = 0x01;
++ memset(&mac_key, 0x0, sizeof(mac_key));
++ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
++ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
++
++ if (entry_index == DSAF_INVALID_ENTRY_IDX)
++ return;
++
++ /* config mc vague table */
++ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
++ &tbl_tcam_mask, &tbl_tcam_mcast);
++ /* update soft management table. */
+ soft_mac_entry += entry_index;
+- soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
++ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
++}
++
++/* Reserve the last TCAM entry for promisc support */
++void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
++ u32 port, bool enable)
++{
++ if (enable)
++ set_promisc_tcam_enable(dsaf_dev, port);
++ else
++ set_promisc_tcam_disable(dsaf_dev, port);
+ }
+
+ int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+index 6d20e4eb7402..ae97b203f73b 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+@@ -176,7 +176,7 @@
+ #define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
+ #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
+ #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
+-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
++#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
+ #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+ #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+ #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
+@@ -404,11 +404,11 @@
+ #define RCB_ECC_ERR_ADDR4_REG 0x460
+ #define RCB_ECC_ERR_ADDR5_REG 0x464
+
+-#define RCB_COM_SF_CFG_INTMASK_RING 0x480
+-#define RCB_COM_SF_CFG_RING_STS 0x484
+-#define RCB_COM_SF_CFG_RING 0x488
+-#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
+-#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
++#define RCB_COM_SF_CFG_INTMASK_RING 0x470
++#define RCB_COM_SF_CFG_RING_STS 0x474
++#define RCB_COM_SF_CFG_RING 0x478
++#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
++#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
+ #define RCB_COM_RCB_RD_BD_BUSY 0x490
+ #define RCB_COM_RCB_FBD_CRT_EN 0x494
+ #define RCB_COM_AXI_WR_ERR_INTMASK 0x498
+@@ -534,6 +534,7 @@
+ #define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
+ #define GMAC_LOOP_REG 0x01DCUL
+ #define GMAC_RECV_CONTROL_REG 0x01E0UL
++#define GMAC_PCS_RX_EN_REG 0x01E4UL
+ #define GMAC_VLAN_CODE_REG 0x01E8UL
+ #define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
+ #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 4faadc3ffe8c..86662a14208e 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -1286,6 +1286,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
++ if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
++ phy_stop(phy_dev);
++
+ return 0;
+ }
+
+@@ -1381,6 +1384,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+ return cpu;
+ }
+
++static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
++{
++ int i;
++
++ for (i = 0; i < q_num * 2; i++) {
++ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
++ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
++ NULL);
++ free_irq(priv->ring_data[i].ring->irq,
++ &priv->ring_data[i]);
++ priv->ring_data[i].ring->irq_init_flag =
++ RCB_IRQ_NOT_INITED;
++ }
++ }
++}
++
+ static int hns_nic_init_irq(struct hns_nic_priv *priv)
+ {
+ struct hnae_handle *h = priv->ae_handle;
+@@ -1406,7 +1425,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ rd->ring->irq);
+- return ret;
++ goto out_free_irq;
+ }
+ disable_irq(rd->ring->irq);
+
+@@ -1421,6 +1440,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
+ }
+
+ return 0;
++
++out_free_irq:
++ hns_nic_free_irq(h->q_num, priv);
++ return ret;
+ }
+
+ static int hns_nic_net_up(struct net_device *ndev)
+@@ -1430,6 +1453,9 @@ static int hns_nic_net_up(struct net_device *ndev)
+ int i, j;
+ int ret;
+
++ if (!test_bit(NIC_STATE_DOWN, &priv->state))
++ return 0;
++
+ ret = hns_nic_init_irq(priv);
+ if (ret != 0) {
+ netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+@@ -1465,6 +1491,7 @@ out_has_some_queues:
+ for (j = i - 1; j >= 0; j--)
+ hns_nic_ring_close(ndev, j);
+
++ hns_nic_free_irq(h->q_num, priv);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ return ret;
+@@ -1582,11 +1609,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
+ }
+
+ static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
++#define HNS_TX_TIMEO_LIMIT (40 * HZ)
+ static void hns_nic_net_timeout(struct net_device *ndev)
+ {
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+- hns_tx_timeout_reset(priv);
++ if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
++ ndev->watchdog_timeo *= 2;
++ netdev_info(ndev, "watchdog_timo changed to %d.\n",
++ ndev->watchdog_timeo);
++ } else {
++ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
++ hns_tx_timeout_reset(priv);
++ }
+ }
+
+ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
+@@ -2166,11 +2201,11 @@ static void hns_nic_service_task(struct work_struct *work)
+ = container_of(work, struct hns_nic_priv, service_task);
+ struct hnae_handle *h = priv->ae_handle;
+
++ hns_nic_reset_subtask(priv);
+ hns_nic_update_link_status(priv->netdev);
+ h->dev->ops->update_led_status(h);
+ hns_nic_update_stats(priv->netdev);
+
+- hns_nic_reset_subtask(priv);
+ hns_nic_service_event_complete(priv);
+ }
+
+@@ -2451,7 +2486,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
+ ndev->min_mtu = MAC_MIN_MTU;
+ switch (priv->enet_ver) {
+ case AE_VERSION_2:
+- ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
++ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 14c53ed5cca6..c914b338691b 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1596,7 +1596,7 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ }
+ }
+
+- rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
++ rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
+ if (!rwi) {
+ mutex_unlock(&adapter->rwi_lock);
+ ibmvnic_close(netdev);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 176c99b8251d..904b42becd45 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1554,17 +1554,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
+ netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
+ /* Copy the address first, so that we avoid a possible race with
+- * .set_rx_mode(). If we copy after changing the address in the filter
+- * list, we might open ourselves to a narrow race window where
+- * .set_rx_mode could delete our dev_addr filter and prevent traffic
+- * from passing.
++ * .set_rx_mode().
++ * - Remove old address from MAC filter
++ * - Copy new address
++ * - Add new address to MAC filter
+ */
+- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+-
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+- i40e_add_mac_filter(vsi, addr->sa_data);
++ ether_addr_copy(netdev->dev_addr, addr->sa_data);
++ i40e_add_mac_filter(vsi, netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
++
+ if (vsi->type == I40E_VSI_MAIN) {
+ i40e_status ret;
+
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+index 6223930a8155..6f57b0b7d57a 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+ struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+ struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+- u64 data0, data1 = 0, steer_ctrl = 0;
++ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
+index 4a67c55aa9f1..11a9add81849 100644
+--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
++++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
+@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
+ .ndo_validate_addr = eth_validate_addr,
+ };
+
+-static void __init get_mac_address(struct net_device *dev)
++static void get_mac_address(struct net_device *dev)
+ {
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index 3dd973475125..4b444351ab7d 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
+ return -EINVAL;
+ }
+ val = nx_get_bios_version(adapter);
+- netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
++ if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
++ return -EIO;
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 5f52f14761a3..b73bcbeb5f27 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -2351,6 +2351,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
++ rc = -ENOMEM;
+ goto err;
+ }
+
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 22e466ea919a..dcd10dba08c7 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -722,7 +722,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
+ static void ca8210_rx_done(struct cas_control *cas_ctl)
+ {
+ u8 *buf;
+- u8 len;
++ unsigned int len;
+ struct work_priv_container *mlme_reset_wpc;
+ struct ca8210_priv *priv = cas_ctl->priv;
+
+@@ -731,7 +731,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
+ if (len > CA8210_SPI_BUF_SIZE) {
+ dev_crit(
+ &priv->spi->dev,
+- "Received packet len (%d) erroneously long\n",
++ "Received packet len (%u) erroneously long\n",
+ len
+ );
+ goto finish;
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index e069b310d6a6..b62c41114e34 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2212,6 +2212,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
++ /* Added to support MAC address changes */
++ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
++ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 969474c9d297..891f8f975b43 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -151,17 +151,18 @@ static bool qmimux_has_slaves(struct usbnet *dev)
+
+ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+- unsigned int len, offset = sizeof(struct qmimux_hdr);
++ unsigned int len, offset = 0;
+ struct qmimux_hdr *hdr;
+ struct net_device *net;
+ struct sk_buff *skbn;
++ u8 qmimux_hdr_sz = sizeof(*hdr);
+
+- while (offset < skb->len) {
+- hdr = (struct qmimux_hdr *)skb->data;
++ while (offset + qmimux_hdr_sz < skb->len) {
++ hdr = (struct qmimux_hdr *)(skb->data + offset);
+ len = be16_to_cpu(hdr->pkt_len);
+
+ /* drop the packet, bogus length */
+- if (offset + len > skb->len)
++ if (offset + len + qmimux_hdr_sz > skb->len)
+ return 0;
+
+ /* control packet, we do not know what to do */
+@@ -176,7 +177,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ return 0;
+ skbn->dev = net;
+
+- switch (skb->data[offset] & 0xf0) {
++ switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
+ case 0x40:
+ skbn->protocol = htons(ETH_P_IP);
+ break;
+@@ -188,12 +189,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ goto skip;
+ }
+
+- skb_put_data(skbn, skb->data + offset, len);
++ skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
+ if (netif_rx(skbn) != NET_RX_SUCCESS)
+ return 0;
+
+ skip:
+- offset += len + sizeof(struct qmimux_hdr);
++ offset += len + qmimux_hdr_sz;
+ }
+ return 1;
+ }
+diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
+index 85f2ca989565..ef3ffa5ad466 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_common.c
++++ b/drivers/net/wireless/broadcom/b43/phy_common.c
+@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
+ u8 i;
+ s32 tmp;
+ s8 signx = 1;
+- u32 angle = 0;
++ s32 angle = 0;
+ struct b43_c32 ret = { .i = 39797, .q = 0, };
+
+ while (theta > (180 << 16))
+diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
+index 6c43322dbb97..2998941fdeca 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson.c
++++ b/drivers/pinctrl/meson/pinctrl-meson.c
+@@ -272,7 +272,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
+
+- meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
++ meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
++ &bit);
+ ret = regmap_update_bits(pc->reg_pullen, reg,
+ BIT(bit), 0);
+ if (ret)
+diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
+index 3bc2eea7b3b7..62926804949d 100644
+--- a/drivers/power/supply/olpc_battery.c
++++ b/drivers/power/supply/olpc_battery.c
+@@ -427,14 +427,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
+ if (ret)
+ return ret;
+
+- val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
++ val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT:
+ ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
+ if (ret)
+ return ret;
+
+- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
++ val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 84752152d41f..fab02bd73d85 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -274,16 +274,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
+ */
+ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
+ {
+- while (atomic_read(&adapter->stat_miss) > 0)
++ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
+ if (zfcp_fsf_status_read(adapter->qdio)) {
++ atomic_inc(&adapter->stat_miss); /* undo add -1 */
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
+ zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
+ return 1;
+ }
+ break;
+- } else
+- atomic_dec(&adapter->stat_miss);
++ }
+ return 0;
+ }
+
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 6844ba361616..89f09b122135 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -2372,7 +2372,7 @@ static int _bnx2fc_create(struct net_device *netdev,
+ if (!interface) {
+ printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
+ rc = -ENOMEM;
+- goto ifput_err;
++ goto netdev_err;
+ }
+
+ if (is_vlan_dev(netdev)) {
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 24b6e56f6e97..6c2b098b7609 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -13941,7 +13941,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+ hw_page_size))/hw_page_size;
+
+ /* If needed, Adjust page count to match the max the adapter supports */
+- if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
++ if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
++ (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
+ queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
+ INIT_LIST_HEAD(&queue->list);
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+index d4fa41be80f9..0c00bb27c9c5 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+@@ -631,8 +631,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
+
+ static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
+ {
++ struct cxgbit_sock *csk = handle;
++
+ pr_debug("%s cxgbit_device %p\n", __func__, handle);
+ kfree_skb(skb);
++ cxgbit_put_csk(csk);
+ }
+
+ static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
+@@ -1147,7 +1150,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
+ rpl5->opt0 = cpu_to_be64(opt0);
+ rpl5->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
+- t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
++ t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+ }
+
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
+index 4fd775ace541..6340e2e7ffbe 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
+@@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&cdev->kref);
++ spin_lock_init(&cdev->np_lock);
+
+ cdev->lldi = *lldi;
+
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index 95d34d7565c9..1cf78cec7461 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -1393,22 +1393,43 @@ static inline struct console *SUNSU_CONSOLE(void)
+ static enum su_type su_get_type(struct device_node *dp)
+ {
+ struct device_node *ap = of_find_node_by_path("/aliases");
++ enum su_type rc = SU_PORT_PORT;
+
+ if (ap) {
+ const char *keyb = of_get_property(ap, "keyboard", NULL);
+ const char *ms = of_get_property(ap, "mouse", NULL);
++ struct device_node *match;
+
+ if (keyb) {
+- if (dp == of_find_node_by_path(keyb))
+- return SU_PORT_KBD;
++ match = of_find_node_by_path(keyb);
++
++ /*
++ * The pointer is used as an identifier not
++ * as a pointer, we can drop the refcount on
++ * the of__node immediately after getting it.
++ */
++ of_node_put(match);
++
++ if (dp == match) {
++ rc = SU_PORT_KBD;
++ goto out;
++ }
+ }
+ if (ms) {
+- if (dp == of_find_node_by_path(ms))
+- return SU_PORT_MS;
++ match = of_find_node_by_path(ms);
++
++ of_node_put(match);
++
++ if (dp == match) {
++ rc = SU_PORT_MS;
++ goto out;
++ }
+ }
+ }
+
+- return SU_PORT_PORT;
++out:
++ of_node_put(ap);
++ return rc;
+ }
+
+ static int su_probe(struct platform_device *op)
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 248533c0f9ac..831758335e2c 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -522,6 +522,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+
++ vsock->guest_cid = 0; /* no CID assigned yet */
++
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index ff5d32cf9578..92eb9c3052ee 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -3438,7 +3438,6 @@ retry:
+ tcap->cap_id = t_cap_id;
+ tcap->seq = t_seq - 1;
+ tcap->issue_seq = t_seq - 1;
+- tcap->mseq = t_mseq;
+ tcap->issued |= issued;
+ tcap->implemented |= issued;
+ if (cap == ci->i_auth_cap)
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index d4aaddec1b16..21643d2b3fee 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1210,6 +1210,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
+
+ if (rv < 0) {
+ log_error(ls, "create_lkb idr error %d", rv);
++ dlm_free_lkb(lkb);
+ return rv;
+ }
+
+@@ -4176,6 +4177,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+ (unsigned long long)lkb->lkb_recover_seq,
+ ms->m_header.h_nodeid, ms->m_lkid);
+ error = -ENOENT;
++ dlm_put_lkb(lkb);
+ goto fail;
+ }
+
+@@ -4229,6 +4231,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+ lkb->lkb_id, lkb->lkb_remid,
+ ms->m_header.h_nodeid, ms->m_lkid);
+ error = -ENOENT;
++ dlm_put_lkb(lkb);
+ goto fail;
+ }
+
+@@ -5789,20 +5792,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
+ goto out;
+ }
+ }
+-
+- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
+- When DLM_IFL_USER is set, the dlm knows that this is a userspace
+- lock and that lkb_astparam is the dlm_user_args structure. */
+-
+ error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
+ fake_astfn, ua, fake_bastfn, &args);
+- lkb->lkb_flags |= DLM_IFL_USER;
+-
+ if (error) {
++ kfree(ua->lksb.sb_lvbptr);
++ ua->lksb.sb_lvbptr = NULL;
++ kfree(ua);
+ __put_lkb(ls, lkb);
+ goto out;
+ }
+
++ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
++ When DLM_IFL_USER is set, the dlm knows that this is a userspace
++ lock and that lkb_astparam is the dlm_user_args structure. */
++ lkb->lkb_flags |= DLM_IFL_USER;
+ error = request_lock(ls, lkb, name, namelen, &args);
+
+ switch (error) {
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 78a7c855b06b..610f72ae7ad6 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char *cluster,
+ kfree(ls->ls_recover_buf);
+ out_lkbidr:
+ idr_destroy(&ls->ls_lkbidr);
++ out_rsbtbl:
+ for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
+ if (ls->ls_remove_names[i])
+ kfree(ls->ls_remove_names[i]);
+ }
+- out_rsbtbl:
+ vfree(ls->ls_rsbtbl);
+ out_lsfree:
+ if (do_unreg)
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 863749e29bf9..c850579ae5a4 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -743,17 +743,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ the gfs2 structures. */
+ if (default_acl) {
+ error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++ if (error)
++ goto fail_gunlock3;
+ posix_acl_release(default_acl);
++ default_acl = NULL;
+ }
+ if (acl) {
+- if (!error)
+- error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++ error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++ if (error)
++ goto fail_gunlock3;
+ posix_acl_release(acl);
++ acl = NULL;
+ }
+
+- if (error)
+- goto fail_gunlock3;
+-
+ error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
+ &gfs2_initxattrs, NULL);
+ if (error)
+@@ -788,10 +790,8 @@ fail_free_inode:
+ }
+ gfs2_rsqa_delete(ip, NULL);
+ fail_free_acls:
+- if (default_acl)
+- posix_acl_release(default_acl);
+- if (acl)
+- posix_acl_release(acl);
++ posix_acl_release(default_acl);
++ posix_acl_release(acl);
+ fail_gunlock:
+ gfs2_dir_no_add(&da);
+ gfs2_glock_dq_uninit(ghs);
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index b0eee90738ff..914cb3d72ddf 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -1695,9 +1695,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
+ goto next_iter;
+ }
+ if (ret == -E2BIG) {
++ n += rbm->bii - initial_bii;
+ rbm->bii = 0;
+ rbm->offset = 0;
+- n += (rbm->bii - initial_bii);
+ goto res_covered_end_of_rgrp;
+ }
+ return ret;
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 066ac313ae5c..84857ffd2bb8 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
+ fl->fl_start = req->a_res.lock.fl.fl_start;
+ fl->fl_end = req->a_res.lock.fl.fl_end;
+ fl->fl_type = req->a_res.lock.fl.fl_type;
+- fl->fl_pid = 0;
++ fl->fl_pid = -req->a_res.lock.fl.fl_pid;
+ break;
+ default:
+ status = nlm_stat_to_errno(req->a_res.status);
+diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
+index 7147e4aebecc..9846f7e95282 100644
+--- a/fs/lockd/xdr.c
++++ b/fs/lockd/xdr.c
+@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
+
+ locks_init_lock(fl);
+ fl->fl_owner = current->files;
+- fl->fl_pid = (pid_t)lock->svid;
++ fl->fl_pid = current->tgid;
+ fl->fl_flags = FL_POSIX;
+ fl->fl_type = F_RDLCK; /* as good as anything else */
+ start = ntohl(*p++);
+@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(&lock->fl);
+ lock->svid = ~(u32) 0;
+- lock->fl.fl_pid = (pid_t)lock->svid;
++ lock->fl.fl_pid = current->tgid;
+
+ if (!(p = nlm_decode_cookie(p, &argp->cookie))
+ || !(p = xdr_decode_string_inplace(p, &lock->caller,
+diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
+index 7ed9edf9aed4..70154f376695 100644
+--- a/fs/lockd/xdr4.c
++++ b/fs/lockd/xdr4.c
+@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
+
+ locks_init_lock(fl);
+ fl->fl_owner = current->files;
+- fl->fl_pid = (pid_t)lock->svid;
++ fl->fl_pid = current->tgid;
+ fl->fl_flags = FL_POSIX;
+ fl->fl_type = F_RDLCK; /* as good as anything else */
+ p = xdr_decode_hyper(p, &start);
+@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(&lock->fl);
+ lock->svid = ~(u32) 0;
+- lock->fl.fl_pid = (pid_t)lock->svid;
++ lock->fl.fl_pid = current->tgid;
+
+ if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+ || !(p = xdr_decode_string_inplace(p, &lock->caller,
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index 96e69979f84d..7d799c4d2669 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -437,8 +437,7 @@ struct hmm_devmem {
+ * enough and allocate struct page for it.
+ *
+ * The device driver can wrap the hmm_devmem struct inside a private device
+- * driver struct. The device driver must call hmm_devmem_remove() before the
+- * device goes away and before freeing the hmm_devmem struct memory.
++ * driver struct.
+ */
+ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+ struct device *device,
+@@ -446,7 +445,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+ struct device *device,
+ struct resource *res);
+-void hmm_devmem_remove(struct hmm_devmem *devmem);
+
+ /*
+ * hmm_devmem_page_set_drvdata - set per-page driver data field
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index f4058bd4c373..61769d4b7dba 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -742,6 +742,15 @@
+
+ #define ABS_MISC 0x28
+
++/*
++ * 0x2e is reserved and should not be used in input drivers.
++ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
++ * the next ABS_* event is correct or is just ABS_MISC + n.
++ * We define here ABS_RESERVED so userspace can rely on it and detect
++ * the situation described above.
++ */
++#define ABS_RESERVED 0x2e
++
+ #define ABS_MT_SLOT 0x2f /* MT slot being modified */
+ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
+ #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6a219fea4926..6d6ce2c3a364 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1672,8 +1672,6 @@ static __latent_entropy struct task_struct *copy_process(
+
+ posix_cpu_timers_init(p);
+
+- p->start_time = ktime_get_ns();
+- p->real_start_time = ktime_get_boot_ns();
+ p->io_context = NULL;
+ p->audit_context = NULL;
+ cgroup_fork(p);
+@@ -1837,6 +1835,17 @@ static __latent_entropy struct task_struct *copy_process(
+ if (retval)
+ goto bad_fork_free_pid;
+
++ /*
++ * From this point on we must avoid any synchronous user-space
++ * communication until we take the tasklist-lock. In particular, we do
++ * not want user-space to be able to predict the process start-time by
++ * stalling fork(2) after we recorded the start_time but before it is
++ * visible to the system.
++ */
++
++ p->start_time = ktime_get_ns();
++ p->real_start_time = ktime_get_boot_ns();
++
+ /*
+ * Make it visible to the rest of the system, but dont wake it up yet.
+ * Need tasklist lock for parent etc handling!
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index a37a3b4b6342..e0665549af59 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -108,7 +108,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+ int affv = nvecs - affd->pre_vectors - affd->post_vectors;
+ int last_affv = affv + affd->pre_vectors;
+ nodemask_t nodemsk = NODE_MASK_NONE;
+- struct cpumask *masks;
++ struct cpumask *masks = NULL;
+ cpumask_var_t nmsk, *node_to_possible_cpumask;
+
+ /*
+@@ -121,13 +121,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+- masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+- if (!masks)
+- goto out;
+-
+ node_to_possible_cpumask = alloc_node_to_possible_cpumask();
+ if (!node_to_possible_cpumask)
+- goto out;
++ goto outcpumsk;
++
++ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
++ if (!masks)
++ goto outnodemsk;
+
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+@@ -192,8 +192,9 @@ done:
+ /* Fill out vectors at the end that don't need affinity */
+ for (; curvec < nvecs; curvec++)
+ cpumask_copy(masks + curvec, irq_default_affinity);
++outnodemsk:
+ free_node_to_possible_cpumask(node_to_possible_cpumask);
+-out:
++outcpumsk:
+ free_cpumask_var(nmsk);
+ return masks;
+ }
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 790ddf3bce19..0d676d6d2f62 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -379,15 +379,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
+ is_ram = region_intersects(align_start, align_size,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+
+- if (is_ram == REGION_MIXED) {
+- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
+- __func__, res);
++ if (is_ram != REGION_DISJOINT) {
++ WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
++ is_ram == REGION_MIXED ? "mixed" : "ram", res);
+ return ERR_PTR(-ENXIO);
+ }
+
+- if (is_ram == REGION_INTERSECTS)
+- return __va(res->start);
+-
+ if (!ref)
+ return ERR_PTR(-EINVAL);
+
+@@ -482,7 +479,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
+ devres_free(page_map);
+ return ERR_PTR(error);
+ }
+-EXPORT_SYMBOL(devm_memremap_pages);
++EXPORT_SYMBOL_GPL(devm_memremap_pages);
+
+ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+ {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7240bb4a4090..6e108af21481 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -369,10 +369,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+ }
+ }
+
+-/* Iterate thr' all leaf cfs_rq's on a runqueue */
+-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
+- list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
+- leaf_cfs_rq_list)
++/* Iterate through all leaf cfs_rq's on a runqueue: */
++#define for_each_leaf_cfs_rq(rq, cfs_rq) \
++ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+ /* Do the two (enqueued) entities belong to the same group ? */
+ static inline struct cfs_rq *
+@@ -465,8 +464,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+ {
+ }
+
+-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
+- for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
++#define for_each_leaf_cfs_rq(rq, cfs_rq) \
++ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+ static inline struct sched_entity *parent_entity(struct sched_entity *se)
+ {
+@@ -6970,27 +6969,10 @@ static void attach_tasks(struct lb_env *env)
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+
+-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+-{
+- if (cfs_rq->load.weight)
+- return false;
+-
+- if (cfs_rq->avg.load_sum)
+- return false;
+-
+- if (cfs_rq->avg.util_sum)
+- return false;
+-
+- if (cfs_rq->runnable_load_sum)
+- return false;
+-
+- return true;
+-}
+-
+ static void update_blocked_averages(int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+- struct cfs_rq *cfs_rq, *pos;
++ struct cfs_rq *cfs_rq;
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+@@ -7000,7 +6982,7 @@ static void update_blocked_averages(int cpu)
+ * Iterates the task_group tree in a bottom up fashion, see
+ * list_add_leaf_cfs_rq() for details.
+ */
+- for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
++ for_each_leaf_cfs_rq(rq, cfs_rq) {
+ struct sched_entity *se;
+
+ /* throttled entities do not contribute to load */
+@@ -7014,13 +6996,6 @@ static void update_blocked_averages(int cpu)
+ se = cfs_rq->tg->se[cpu];
+ if (se && !skip_blocked_update(se))
+ update_load_avg(se, 0);
+-
+- /*
+- * There can be a lot of idle CPU cgroups. Don't let fully
+- * decayed cfs_rqs linger on the list.
+- */
+- if (cfs_rq_is_decayed(cfs_rq))
+- list_del_leaf_cfs_rq(cfs_rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+ }
+@@ -9580,10 +9555,10 @@ const struct sched_class fair_sched_class = {
+ #ifdef CONFIG_SCHED_DEBUG
+ void print_cfs_stats(struct seq_file *m, int cpu)
+ {
+- struct cfs_rq *cfs_rq, *pos;
++ struct cfs_rq *cfs_rq;
+
+ rcu_read_lock();
+- for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
++ for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+ print_cfs_rq(m, cpu, cfs_rq);
+ rcu_read_unlock();
+ }
+diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
+index 4add700ddfe3..ad523be0313b 100644
+--- a/lib/raid6/Makefile
++++ b/lib/raid6/Makefile
+@@ -18,6 +18,21 @@ quiet_cmd_unroll = UNROLL $@
+
+ ifeq ($(CONFIG_ALTIVEC),y)
+ altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
++
++ifeq ($(cc-name),clang)
++# clang ppc port does not yet support -maltivec when -msoft-float is
++# enabled. A future release of clang will resolve this
++# https://bugs.llvm.org/show_bug.cgi?id=31177
++CFLAGS_REMOVE_altivec1.o += -msoft-float
++CFLAGS_REMOVE_altivec2.o += -msoft-float
++CFLAGS_REMOVE_altivec4.o += -msoft-float
++CFLAGS_REMOVE_altivec8.o += -msoft-float
++CFLAGS_REMOVE_altivec8.o += -msoft-float
++CFLAGS_REMOVE_vpermxor1.o += -msoft-float
++CFLAGS_REMOVE_vpermxor2.o += -msoft-float
++CFLAGS_REMOVE_vpermxor4.o += -msoft-float
++CFLAGS_REMOVE_vpermxor8.o += -msoft-float
++endif
+ endif
+
+ # The GCC option -ffreestanding is required in order to compile code containing
+diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
+index b9cdeecc19dc..777b491df25d 100644
+--- a/lib/test_debug_virtual.c
++++ b/lib/test_debug_virtual.c
+@@ -5,6 +5,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
+ #include <linux/sizes.h>
++#include <linux/io.h>
+
+ #include <asm/page.h>
+ #ifdef CONFIG_MIPS
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 81ff1dbbf8a8..a5def9f34385 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -767,7 +767,6 @@ static void hmm_devmem_ref_exit(void *data)
+
+ devmem = container_of(ref, struct hmm_devmem, ref);
+ percpu_ref_exit(ref);
+- devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
+ }
+
+ static void hmm_devmem_ref_kill(void *data)
+@@ -778,7 +777,6 @@ static void hmm_devmem_ref_kill(void *data)
+ devmem = container_of(ref, struct hmm_devmem, ref);
+ percpu_ref_kill(ref);
+ wait_for_completion(&devmem->completion);
+- devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
+ }
+
+ static int hmm_devmem_fault(struct vm_area_struct *vma,
+@@ -818,7 +816,7 @@ static void hmm_devmem_radix_release(struct resource *resource)
+ mutex_unlock(&hmm_devmem_lock);
+ }
+
+-static void hmm_devmem_release(struct device *dev, void *data)
++static void hmm_devmem_release(void *data)
+ {
+ struct hmm_devmem *devmem = data;
+ struct resource *resource = devmem->resource;
+@@ -826,11 +824,6 @@ static void hmm_devmem_release(struct device *dev, void *data)
+ struct zone *zone;
+ struct page *page;
+
+- if (percpu_ref_tryget_live(&devmem->ref)) {
+- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
+- percpu_ref_put(&devmem->ref);
+- }
+-
+ /* pages are dead and unused, undo the arch mapping */
+ start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
+ npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
+@@ -961,19 +954,6 @@ error:
+ return ret;
+ }
+
+-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
+-{
+- struct hmm_devmem *devmem = data;
+-
+- return devmem->resource == match_data;
+-}
+-
+-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
+-{
+- devres_release(devmem->device, &hmm_devmem_release,
+- &hmm_devmem_match, devmem->resource);
+-}
+-
+ /*
+ * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
+ *
+@@ -1001,8 +981,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+
+ static_branch_enable(&device_private_key);
+
+- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
+- GFP_KERNEL, dev_to_node(device));
++ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1016,11 +995,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+ ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
+ 0, GFP_KERNEL);
+ if (ret)
+- goto error_percpu_ref;
++ return ERR_PTR(ret);
+
+- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
++ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
+ if (ret)
+- goto error_devm_add_action;
++ return ERR_PTR(ret);
+
+ size = ALIGN(size, PA_SECTION_SIZE);
+ addr = min((unsigned long)iomem_resource.end,
+@@ -1040,16 +1019,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+
+ devmem->resource = devm_request_mem_region(device, addr, size,
+ dev_name(device));
+- if (!devmem->resource) {
+- ret = -ENOMEM;
+- goto error_no_resource;
+- }
++ if (!devmem->resource)
++ return ERR_PTR(-ENOMEM);
+ break;
+ }
+- if (!devmem->resource) {
+- ret = -ERANGE;
+- goto error_no_resource;
+- }
++ if (!devmem->resource)
++ return ERR_PTR(-ERANGE);
+
+ devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
+ devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
+@@ -1058,30 +1033,15 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+
+ ret = hmm_devmem_pages_create(devmem);
+ if (ret)
+- goto error_pages;
+-
+- devres_add(device, devmem);
++ return ERR_PTR(ret);
+
+- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
+- if (ret) {
+- hmm_devmem_remove(devmem);
++ ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
++ if (ret)
+ return ERR_PTR(ret);
+- }
+
+ return devmem;
+-
+-error_pages:
+- devm_release_mem_region(device, devmem->resource->start,
+- resource_size(devmem->resource));
+-error_no_resource:
+-error_devm_add_action:
+- hmm_devmem_ref_kill(&devmem->ref);
+- hmm_devmem_ref_exit(&devmem->ref);
+-error_percpu_ref:
+- devres_free(devmem);
+- return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL(hmm_devmem_add);
++EXPORT_SYMBOL_GPL(hmm_devmem_add);
+
+ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+ struct device *device,
+@@ -1095,8 +1055,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+
+ static_branch_enable(&device_private_key);
+
+- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
+- GFP_KERNEL, dev_to_node(device));
++ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1110,12 +1069,12 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+ ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
+ 0, GFP_KERNEL);
+ if (ret)
+- goto error_percpu_ref;
++ return ERR_PTR(ret);
+
+- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
++ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
++ &devmem->ref);
+ if (ret)
+- goto error_devm_add_action;
+-
++ return ERR_PTR(ret);
+
+ devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
+ devmem->pfn_last = devmem->pfn_first +
+@@ -1123,58 +1082,20 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+
+ ret = hmm_devmem_pages_create(devmem);
+ if (ret)
+- goto error_devm_add_action;
++ return ERR_PTR(ret);
+
+- devres_add(device, devmem);
++ ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
++ if (ret)
++ return ERR_PTR(ret);
+
+- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
+- if (ret) {
+- hmm_devmem_remove(devmem);
++ ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
++ &devmem->ref);
++ if (ret)
+ return ERR_PTR(ret);
+- }
+
+ return devmem;
+-
+-error_devm_add_action:
+- hmm_devmem_ref_kill(&devmem->ref);
+- hmm_devmem_ref_exit(&devmem->ref);
+-error_percpu_ref:
+- devres_free(devmem);
+- return ERR_PTR(ret);
+-}
+-EXPORT_SYMBOL(hmm_devmem_add_resource);
+-
+-/*
+- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
+- *
+- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
+- *
+- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
+- * of the device driver. It will free struct page and remove the resource that
+- * reserved the physical address range for this device memory.
+- */
+-void hmm_devmem_remove(struct hmm_devmem *devmem)
+-{
+- resource_size_t start, size;
+- struct device *device;
+- bool cdm = false;
+-
+- if (!devmem)
+- return;
+-
+- device = devmem->device;
+- start = devmem->resource->start;
+- size = resource_size(devmem->resource);
+-
+- cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
+- hmm_devmem_ref_kill(&devmem->ref);
+- hmm_devmem_ref_exit(&devmem->ref);
+- hmm_devmem_pages_remove(devmem);
+-
+- if (!cdm)
+- devm_release_mem_region(device, start, size);
+ }
+-EXPORT_SYMBOL(hmm_devmem_remove);
++EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
+
+ /*
+ * A device driver that wants to handle multiple devices memory through a
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index d4b5f29906b9..c7c74a927d6f 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -35,6 +35,7 @@
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h>
+ #include <linux/compaction.h>
++#include <linux/rmap.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -1391,6 +1392,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ pfn = page_to_pfn(compound_head(page))
+ + hpage_nr_pages(page) - 1;
+
++ /*
++ * HWPoison pages have elevated reference counts so the migration would
++ * fail on them. It also doesn't make any sense to migrate them in the
++ * first place. Still try to unmap such a page in case it is still mapped
++ * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
++ * the unmap as the catch all safety net).
++ */
++ if (PageHWPoison(page)) {
++ if (WARN_ON(PageLRU(page)))
++ isolate_lru_page(page);
++ if (page_mapped(page))
++ try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
++ continue;
++ }
++
+ if (!get_page_unless_zero(page))
+ continue;
+ /*
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 08e8cd21770c..af3c4c5a0b4e 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2218,7 +2218,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
+ */
+ if (PageSwapCache(page) &&
+ likely(page_private(page) == entry.val) &&
+- !page_swapped(page))
++ (!PageTransCompound(page) ||
++ !swap_page_trans_huge_swapped(si, entry)))
+ delete_from_swap_cache(compound_head(page));
+
+ /*
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 3ec5a82929b2..ef0f8fe3ac08 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
+ ret = r;
+ continue;
+ }
++ if (option < 4096) {
++ p9_debug(P9_DEBUG_ERROR,
++ "msize should be at least 4k\n");
++ ret = -EINVAL;
++ continue;
++ }
+ clnt->msize = option;
+ break;
+ case Opt_trans:
+@@ -996,10 +1002,18 @@ static int p9_client_version(struct p9_client *c)
+ else if (!strncmp(version, "9P2000", 6))
+ c->proto_version = p9_proto_legacy;
+ else {
++ p9_debug(P9_DEBUG_ERROR,
++ "server returned an unknown version: %s\n", version);
+ err = -EREMOTEIO;
+ goto error;
+ }
+
++ if (msize < 4096) {
++ p9_debug(P9_DEBUG_ERROR,
++ "server returned a msize < 4096: %d\n", msize);
++ err = -EREMOTEIO;
++ goto error;
++ }
+ if (msize < c->msize)
+ c->msize = msize;
+
+@@ -1064,6 +1078,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
+ if (clnt->msize > clnt->trans_mod->maxsize)
+ clnt->msize = clnt->trans_mod->maxsize;
+
++ if (clnt->msize < 4096) {
++ p9_debug(P9_DEBUG_ERROR,
++ "Please specify a msize of at least 4k\n");
++ err = -EINVAL;
++ goto free_client;
++ }
++
+ err = p9_client_version(clnt);
+ if (err)
+ goto close_trans;
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 35912270087c..b18466cf466c 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -545,6 +545,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
+ }
+
+ ieee80211_led_tx(local);
++
++ if (skb_has_frag_list(skb)) {
++ kfree_skb_list(skb_shinfo(skb)->frag_list);
++ skb_shinfo(skb)->frag_list = NULL;
++ }
+ }
+
+ /*
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 75d52aed6fdb..e563921e6af5 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -542,8 +542,8 @@ nla_put_failure:
+ ret = -EMSGSIZE;
+ } else {
+ cb->args[IPSET_CB_ARG0] = i;
++ ipset_nest_end(skb, atd);
+ }
+- ipset_nest_end(skb, atd);
+ out:
+ rcu_read_unlock();
+ return ret;
+diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
+index a975efd6b8c3..9da303461069 100644
+--- a/net/netfilter/nf_conntrack_seqadj.c
++++ b/net/netfilter/nf_conntrack_seqadj.c
+@@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
+ /* TCP SACK sequence number adjustment */
+ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+- struct tcphdr *tcph,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+ {
+- unsigned int dir, optoff, optend;
++ struct tcphdr *tcph = (void *)skb->data + protoff;
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
++ unsigned int dir, optoff, optend;
+
+ optoff = protoff + sizeof(struct tcphdr);
+ optend = protoff + tcph->doff * 4;
+@@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
+ if (!skb_make_writable(skb, optend))
+ return 0;
+
++ tcph = (void *)skb->data + protoff;
+ dir = CTINFO2DIR(ctinfo);
+
+ while (optoff < optend) {
+@@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
+ ntohl(newack));
+ tcph->ack_seq = newack;
+
+- res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
++ res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
+ out:
+ spin_unlock_bh(&ct->lock);
+
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index af8345fc4fbd..ed0ea64b8d04 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -97,7 +97,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
+ dst = skb_dst(skb);
+ if (dst->xfrm)
+ dst = ((struct xfrm_dst *)dst)->route;
+- dst_hold(dst);
++ if (!dst_hold_safe(dst))
++ return -EHOSTUNREACH;
+
+ dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
+ if (IS_ERR(dst))
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index f41ffb22652c..cc08cb1292a9 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1120,7 +1120,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ struct rsi *rsip, rsikey;
+ int ret;
+- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+
+ memset(&rsikey, 0, sizeof(rsikey));
+ ret = gss_read_verf(gc, argv, authp,
+@@ -1231,7 +1231,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
+ uint64_t handle;
+ int status;
+ int ret;
+- struct net *net = rqstp->rq_xprt->xpt_net;
++ struct net *net = SVC_NET(rqstp);
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ memset(&ud, 0, sizeof(ud));
+@@ -1422,7 +1422,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
+ __be32 *rpcstart;
+ __be32 *reject_stat = resv->iov_base + resv->iov_len;
+ int ret;
+- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+
+ dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
+ argv->iov_len);
+@@ -1710,7 +1710,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
+ struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct xdr_buf *resbuf = &rqstp->rq_res;
+ int stat = -EINVAL;
+- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+
+ if (gc->gc_proc != RPC_GSS_PROC_DATA)
+ goto out;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 79d55d949d9a..f2cf4edf219b 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
+ h->last_refresh = now;
+ }
+
++static void cache_fresh_locked(struct cache_head *head, time_t expiry,
++ struct cache_detail *detail);
++static void cache_fresh_unlocked(struct cache_head *head,
++ struct cache_detail *detail);
++
+ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
+ struct cache_head *key, int hash)
+ {
+@@ -95,6 +100,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
+ if (cache_is_expired(detail, tmp)) {
+ hlist_del_init(&tmp->cache_list);
+ detail->entries --;
++ cache_fresh_locked(tmp, 0, detail);
+ freeme = tmp;
+ break;
+ }
+@@ -110,8 +116,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
+ cache_get(new);
+ write_unlock(&detail->hash_lock);
+
+- if (freeme)
++ if (freeme) {
++ cache_fresh_unlocked(freeme, detail);
+ cache_put(freeme, detail);
++ }
+ return new;
+ }
+ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 30192abfdc3b..05a58cc1b0cd 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2223,8 +2223,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ trace_rpc_socket_connect(xprt, sock, 0);
+ status = 0;
+ out:
+- xprt_unlock_connect(xprt, transport);
+ xprt_clear_connecting(xprt);
++ xprt_unlock_connect(xprt, transport);
+ xprt_wake_pending_tasks(xprt, status);
+ }
+
+@@ -2451,8 +2451,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ }
+ status = -EAGAIN;
+ out:
+- xprt_unlock_connect(xprt, transport);
+ xprt_clear_connecting(xprt);
++ xprt_unlock_connect(xprt, transport);
+ xprt_wake_pending_tasks(xprt, status);
+ }
+
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 8e75319dd9c0..06dec32503bd 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -341,6 +341,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+
+ skb->sp->xvec[skb->sp->len++] = x;
+
++ skb_dst_force(skb);
++ if (!skb_dst(skb)) {
++ XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
++ goto drop;
++ }
++
+ lock:
+ spin_lock(&x->lock);
+
+@@ -380,7 +386,6 @@ lock:
+ XFRM_SKB_CB(skb)->seq.input.low = seq;
+ XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+
+- skb_dst_force(skb);
+ dev_hold(skb->dev);
+
+ if (crypto_done)
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index c47660fba498..b226b230e8bf 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -103,6 +103,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
+ skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
++ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 6c4ec69e11a0..0cd2bdf3b217 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -789,7 +789,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
+ {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ si->sadcnt = net->xfrm.state_num;
+- si->sadhcnt = net->xfrm.state_hmask;
++ si->sadhcnt = net->xfrm.state_hmask + 1;
+ si->sadhmcnt = xfrm_state_hashmax;
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index be9e5deb58ba..3edc9c04cb46 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -242,7 +242,7 @@ else
+ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+ "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
+ "$(if $(CONFIG_64BIT),64,32)" \
+- "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
++ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
+ "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
+ "$(if $(part-of-module),1,0)" "$(@)";
+ recordmcount_source := $(srctree)/scripts/recordmcount.pl
+diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
+index cb993801e4b2..16dc157f9662 100755
+--- a/scripts/checkstack.pl
++++ b/scripts/checkstack.pl
+@@ -46,8 +46,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
+ $xs = "[0-9a-f ]"; # hex character or space
+ $funcre = qr/^$x* <(.*)>:$/;
+ if ($arch eq 'aarch64') {
+- #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
+- $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
++ #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
++ $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
+ } elsif ($arch eq 'arm') {
+ #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
+ $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
+diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
+index 9ee9bf7fd1a2..1dd24c5b9b47 100644
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -221,6 +221,7 @@ static int symbol_valid(struct sym_entry *s)
+
+ static char *special_prefixes[] = {
+ "__crc_", /* modversions */
++ "__efistub_", /* arm64 EFI stub namespace */
+ NULL };
+
+ static char *special_suffixes[] = {
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 6688ac5b991e..ffeb644bfecd 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -2107,6 +2107,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ {
+ int i, j, rc;
+ u32 nel, len;
++ __be64 prefixbuf[1];
+ __le32 buf[3];
+ struct ocontext *l, *c;
+ u32 nodebuf[8];
+@@ -2216,21 +2217,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ goto out;
+ break;
+ }
+- case OCON_IBPKEY:
+- rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
++ case OCON_IBPKEY: {
++ u32 pkey_lo, pkey_hi;
++
++ rc = next_entry(prefixbuf, fp, sizeof(u64));
++ if (rc)
++ goto out;
++
++ /* we need to have subnet_prefix in CPU order */
++ c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
++
++ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+
+- c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
++ pkey_lo = le32_to_cpu(buf[0]);
++ pkey_hi = le32_to_cpu(buf[1]);
+
+- if (nodebuf[2] > 0xffff ||
+- nodebuf[3] > 0xffff) {
++ if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+- c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
+- c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
++ c->u.ibpkey.low_pkey = pkey_lo;
++ c->u.ibpkey.high_pkey = pkey_hi;
+
+ rc = context_read_and_validate(&c->context[0],
+ p,
+@@ -2238,7 +2248,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ if (rc)
+ goto out;
+ break;
+- case OCON_IBENDPORT:
++ }
++ case OCON_IBENDPORT: {
++ u32 port;
++
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+@@ -2248,12 +2261,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ if (rc)
+ goto out;
+
+- if (buf[1] > 0xff || buf[1] == 0) {
++ port = le32_to_cpu(buf[1]);
++ if (port > U8_MAX || port == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+- c->u.ibendport.port = le32_to_cpu(buf[1]);
++ c->u.ibendport.port = port;
+
+ rc = context_read_and_validate(&c->context[0],
+ p,
+@@ -2261,7 +2275,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ if (rc)
+ goto out;
+ break;
+- }
++ } /* end case */
++ } /* end switch */
+ }
+ }
+ rc = 0;
+@@ -3104,6 +3119,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+ {
+ unsigned int i, j, rc;
+ size_t nel, len;
++ __be64 prefixbuf[1];
+ __le32 buf[3];
+ u32 nodebuf[8];
+ struct ocontext *c;
+@@ -3191,12 +3207,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+ return rc;
+ break;
+ case OCON_IBPKEY:
+- *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
++ /* subnet_prefix is in CPU order */
++ prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+
+- nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
+- nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
++ rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
++ if (rc)
++ return rc;
++
++ buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
++ buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+- rc = put_entry(nodebuf, sizeof(u32), 4, fp);
++ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
+index aa61615288ff..f03bbd0eb027 100644
+--- a/sound/pci/cs46xx/dsp_spos.c
++++ b/sound/pci/cs46xx/dsp_spos.c
+@@ -900,6 +900,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
+ struct dsp_spos_instance * ins = chip->dsp_spos_instance;
+ int i;
+
++ if (!ins)
++ return 0;
++
+ snd_info_free_entry(ins->proc_sym_info_entry);
+ ins->proc_sym_info_entry = NULL;
+
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 4d950b7c2f97..b3be0d432a75 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1888,7 +1888,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
+ char *name)
+ {
+ struct uac_processing_unit_descriptor *desc = raw_desc;
+- int num_ins = desc->bNrInPins;
++ int num_ins;
+ struct usb_mixer_elem_info *cval;
+ struct snd_kcontrol *kctl;
+ int i, err, nameid, type, len;
+@@ -1903,7 +1903,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
+ 0, NULL, default_value_info
+ };
+
+- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
++ if (desc->bLength < 13) {
++ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
++ return -EINVAL;
++ }
++
++ num_ins = desc->bNrInPins;
++ if (desc->bLength < 13 + num_ins ||
+ desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+ return -EINVAL;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 15cbe2565703..d32727c74a16 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3321,6 +3321,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ }
+ }
+ },
++ {
++ .ifnum = -1
++ },
+ }
+ }
+ },
+diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
+index 860fa151640a..ffca068e4a76 100644
+--- a/tools/cgroup/Makefile
++++ b/tools/cgroup/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for cgroup tools
+
+-CC = $(CROSS_COMPILE)gcc
+ CFLAGS = -Wall -Wextra
+
+ all: cgroup_event_listener
+diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
+index 805a2c0cf4cd..240eda014b37 100644
+--- a/tools/gpio/Makefile
++++ b/tools/gpio/Makefile
+@@ -12,8 +12,6 @@ endif
+ # (this improves performance and avoids hard-to-debug behaviour);
+ MAKEFLAGS += -r
+
+-CC = $(CROSS_COMPILE)gcc
+-LD = $(CROSS_COMPILE)ld
+ CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index 31503819454d..68c2d7b059b3 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for Hyper-V tools
+
+-CC = $(CROSS_COMPILE)gcc
+ WARNINGS = -Wall -Wextra
+ CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
+
+diff --git a/tools/iio/Makefile b/tools/iio/Makefile
+index a08e7a47d6a3..332ed2f6c2c2 100644
+--- a/tools/iio/Makefile
++++ b/tools/iio/Makefile
+@@ -12,8 +12,6 @@ endif
+ # (this improves performance and avoids hard-to-debug behaviour);
+ MAKEFLAGS += -r
+
+-CC = $(CROSS_COMPILE)gcc
+-LD = $(CROSS_COMPILE)ld
+ CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer
+diff --git a/tools/laptop/freefall/Makefile b/tools/laptop/freefall/Makefile
+index 5f758c489a20..b572d94255f6 100644
+--- a/tools/laptop/freefall/Makefile
++++ b/tools/laptop/freefall/Makefile
+@@ -2,7 +2,6 @@
+ PREFIX ?= /usr
+ SBINDIR ?= sbin
+ INSTALL ?= install
+-CC = $(CROSS_COMPILE)gcc
+
+ TARGET = freefall
+
+diff --git a/tools/leds/Makefile b/tools/leds/Makefile
+index c379af003807..7b6bed13daaa 100644
+--- a/tools/leds/Makefile
++++ b/tools/leds/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for LEDs tools
+
+-CC = $(CROSS_COMPILE)gcc
+ CFLAGS = -Wall -Wextra -g -I../../include/uapi
+
+ all: uledmon led_hw_brightness_mon
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 2a858ea56a81..349ea5133d83 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -144,12 +144,6 @@ define allow-override
+ $(eval $(1) = $(2)))
+ endef
+
+-# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
+-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+-$(call allow-override,AR,$(CROSS_COMPILE)ar)
+-$(call allow-override,LD,$(CROSS_COMPILE)ld)
+-$(call allow-override,CXX,$(CROSS_COMPILE)g++)
+-
+ LD += $(EXTRA_LDFLAGS)
+
+ HOSTCC ?= gcc
+diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
+index a1883bbb0144..f304be71c278 100644
+--- a/tools/power/acpi/Makefile.config
++++ b/tools/power/acpi/Makefile.config
+@@ -56,9 +56,7 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+ # to compile vs uClibc, that can be done here as well.
+ CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+ CROSS_COMPILE ?= $(CROSS)
+-CC = $(CROSS_COMPILE)gcc
+-LD = $(CROSS_COMPILE)gcc
+-STRIP = $(CROSS_COMPILE)strip
++LD = $(CC)
+ HOSTCC = gcc
+
+ # check if compiler option is supported
+diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
+index 5f3f1f44ed0a..71dc7efc7efa 100644
+--- a/tools/scripts/Makefile.include
++++ b/tools/scripts/Makefile.include
+@@ -42,6 +42,24 @@ EXTRA_WARNINGS += -Wformat
+
+ CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
+
++# Makefiles suck: This macro sets a default value of $(2) for the
++# variable named by $(1), unless the variable has been set by
++# environment or command line. This is necessary for CC and AR
++# because make sets default values, so the simpler ?= approach
++# won't work as expected.
++define allow-override
++ $(if $(or $(findstring environment,$(origin $(1))),\
++ $(findstring command line,$(origin $(1)))),,\
++ $(eval $(1) = $(2)))
++endef
++
++# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
++$(call allow-override,CC,$(CROSS_COMPILE)gcc)
++$(call allow-override,AR,$(CROSS_COMPILE)ar)
++$(call allow-override,LD,$(CROSS_COMPILE)ld)
++$(call allow-override,CXX,$(CROSS_COMPILE)g++)
++$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
++
+ ifeq ($(CC_NO_CLANG), 1)
+ EXTRA_WARNINGS += -Wstrict-aliasing=3
+ endif
+diff --git a/tools/spi/Makefile b/tools/spi/Makefile
+index 90615e10c79a..815d15589177 100644
+--- a/tools/spi/Makefile
++++ b/tools/spi/Makefile
+@@ -11,8 +11,6 @@ endif
+ # (this improves performance and avoids hard-to-debug behaviour);
+ MAKEFLAGS += -r
+
+-CC = $(CROSS_COMPILE)gcc
+-LD = $(CROSS_COMPILE)ld
+ CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ ALL_TARGETS := spidev_test spidev_fdx
+diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
+index e1f75a1914a1..f2a00b0698a3 100644
+--- a/tools/testing/nvdimm/test/iomap.c
++++ b/tools/testing/nvdimm/test/iomap.c
+@@ -114,7 +114,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
+ return nfit_res->buf + offset - nfit_res->res.start;
+ return devm_memremap_pages(dev, res, ref, altmap);
+ }
+-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
++EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
+
+ pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
+ {
+diff --git a/tools/usb/Makefile b/tools/usb/Makefile
+index 4e6506078494..01d758d73b6d 100644
+--- a/tools/usb/Makefile
++++ b/tools/usb/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for USB tools
+
+-CC = $(CROSS_COMPILE)gcc
+ PTHREAD_LIBS = -lpthread
+ WARNINGS = -Wall -Wextra
+ CFLAGS = $(WARNINGS) -g -I../include
+diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
+index 395521a7a8d8..268ce239de65 100644
+--- a/tools/virtio/linux/kernel.h
++++ b/tools/virtio/linux/kernel.h
+@@ -23,6 +23,10 @@
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+ #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
+
++/* generic data direction definitions */
++#define READ 0
++#define WRITE 1
++
+ typedef unsigned long long phys_addr_t;
+ typedef unsigned long long dma_addr_t;
+ typedef size_t __kernel_size_t;
+diff --git a/tools/vm/Makefile b/tools/vm/Makefile
+index be320b905ea7..20f6cf04377f 100644
+--- a/tools/vm/Makefile
++++ b/tools/vm/Makefile
+@@ -6,7 +6,6 @@ TARGETS=page-types slabinfo page_owner_sort
+ LIB_DIR = ../lib/api
+ LIBS = $(LIB_DIR)/libapi.a
+
+-CC = $(CROSS_COMPILE)gcc
+ CFLAGS = -Wall -Wextra -I../lib/
+ LDFLAGS = $(LIBS)
+