summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-03-15 06:26:07 -0400
committerMike Pagano <mpagano@gentoo.org>2018-03-15 06:26:07 -0400
commit769b138afbb1b415122f8af9d94069d9c2d8b28d (patch)
treef2d30eae5e9bf7452b05b853dc81f927ab7d498b
parentLinux patch 4.15.9 (diff)
downloadlinux-patches-4.15-13.tar.gz
linux-patches-4.15-13.tar.bz2
linux-patches-4.15-13.zip
Linux patch 4.15.104.15-13
-rw-r--r--0000_README4
-rw-r--r--1009_linux-4.15.10.patch6492
2 files changed, 6496 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index bce11f77..172ed039 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-4.15.9.patch
From: http://www.kernel.org
Desc: Linux 4.15.9
+Patch: 1009_linux-4.15.10.patch
+From: http://www.kernel.org
+Desc: Linux 4.15.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-4.15.10.patch b/1009_linux-4.15.10.patch
new file mode 100644
index 00000000..8b789774
--- /dev/null
+++ b/1009_linux-4.15.10.patch
@@ -0,0 +1,6492 @@
+diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
+new file mode 100644
+index 000000000000..c6b82511ae8a
+--- /dev/null
++++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
+@@ -0,0 +1,8 @@
++Binding for MIPS Cluster Power Controller (CPC).
++
++This binding allows a system to specify where the CPC registers are
++located.
++
++Required properties:
++compatible : Should be "mti,mips-cpc".
++regs: Should describe the address & size of the CPC register region.
+diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
+index 39aa9e8697cc..fbedcc39460b 100644
+--- a/Documentation/sphinx/kerneldoc.py
++++ b/Documentation/sphinx/kerneldoc.py
+@@ -36,8 +36,7 @@ import glob
+
+ from docutils import nodes, statemachine
+ from docutils.statemachine import ViewList
+-from docutils.parsers.rst import directives
+-from sphinx.util.compat import Directive
++from docutils.parsers.rst import directives, Directive
+ from sphinx.ext.autodoc import AutodocReporter
+
+ __version__ = '1.0'
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 845fc25812f1..8e5d2e5d85bf 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9107,6 +9107,7 @@ MIPS GENERIC PLATFORM
+ M: Paul Burton <paul.burton@mips.com>
+ L: linux-mips@linux-mips.org
+ S: Supported
++F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
+ F: arch/mips/generic/
+ F: arch/mips/tools/generic-board-config.sh
+
+diff --git a/Makefile b/Makefile
+index 0420f9a0c70f..7eed0f168b13 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 15
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+@@ -487,6 +487,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+ endif
+
++RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
++RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
++RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
++export RETPOLINE_CFLAGS
++
+ ifeq ($(config-targets),1)
+ # ===========================================================================
+ # *config targets only - make sure prerequisites are updated, and descend
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 451f96f3377c..5bdc2c4db9ad 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -107,7 +107,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
+ * The following mapping attributes may be updated in live
+ * kernel mappings without the need for break-before-make.
+ */
+- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
++ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+
+ /* creating or taking down mappings is always safe */
+ if (old == 0 || new == 0)
+@@ -117,9 +117,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
+ if ((old | new) & PTE_CONT)
+ return false;
+
+- /* Transitioning from Global to Non-Global is safe */
+- if (((old ^ new) == PTE_NG) && (new & PTE_NG))
+- return true;
++ /* Transitioning from Non-Global to Global is unsafe */
++ if (old & ~new & PTE_NG)
++ return false;
+
+ return ((old ^ new) & ~mask) == 0;
+ }
+diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
+index 9ab48ff80c1c..6d11ae581ea7 100644
+--- a/arch/mips/ath25/board.c
++++ b/arch/mips/ath25/board.c
+@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
+ }
+
+ board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
++ if (!board_data)
++ goto error;
+ ath25_board.config = (struct ath25_boarddata *)board_data;
+ memcpy_fromio(board_data, bcfg, 0x100);
+ if (broken_boarddata) {
+diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
+index 5b3a3f6a9ad3..d99f5242169e 100644
+--- a/arch/mips/cavium-octeon/octeon-irq.c
++++ b/arch/mips/cavium-octeon/octeon-irq.c
+@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ }
+
+ host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
++ if (!host_data)
++ return -ENOMEM;
+ raw_spin_lock_init(&host_data->lock);
+
+ addr = of_get_address(ciu_node, 0, NULL, NULL);
+diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
+index 19c88d770054..fcf9af492d60 100644
+--- a/arch/mips/kernel/mips-cpc.c
++++ b/arch/mips/kernel/mips-cpc.c
+@@ -10,6 +10,8 @@
+
+ #include <linux/errno.h>
+ #include <linux/percpu.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
+ #include <linux/spinlock.h>
+
+ #include <asm/mips-cps.h>
+@@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
+ phys_addr_t __weak mips_cpc_default_phys_base(void)
+ {
++ struct device_node *cpc_node;
++ struct resource res;
++ int err;
++
++ cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
++ if (cpc_node) {
++ err = of_address_to_resource(cpc_node, 0, &res);
++ if (!err)
++ return res.start;
++ }
++
+ return 0;
+ }
+
+diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
+index 87dcac2447c8..382d12eb88f0 100644
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
+ return;
+ }
+
+- if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+- "smp_ipi0", NULL))
++ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
++ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
+ panic("Can't request IPI0 interrupt");
+- if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+- "smp_ipi1", NULL))
++ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
++ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
+ panic("Can't request IPI1 interrupt");
+ }
+
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 5c03e371b7b8..004684eaa827 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2118,6 +2118,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
+ /* we still need the basic sca for the ipte control */
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
++ return;
+ }
+ read_lock(&vcpu->kvm->arch.sca_lock);
+ if (vcpu->kvm->arch.use_esca) {
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 20da391b5f32..7bb4eb14a2e0 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -432,6 +432,7 @@ config GOLDFISH
+ config RETPOLINE
+ bool "Avoid speculative indirect branches in kernel"
+ default y
++ select STACK_VALIDATION if HAVE_STACK_VALIDATION
+ help
+ Compile kernel with the retpoline compiler options to guard against
+ kernel-to-user data leaks by avoiding speculative indirect
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index fad55160dcb9..498c1b812300 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
+ # Avoid indirect branches in kernel to deal with Spectre
+ ifdef CONFIG_RETPOLINE
+- RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+- ifneq ($(RETPOLINE_CFLAGS),)
+- KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+- endif
++ifneq ($(RETPOLINE_CFLAGS),)
++ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
++endif
+ endif
+
+ archscripts: scripts_basic
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index dce7092ab24a..5d10b7a85cad 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
+
+ #define SIZEOF_PTREGS 21*8
+
+-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
++.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+ /*
+ * Push registers and sanitize registers of values that a
+ * speculation attack might otherwise want to exploit. The
+@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
+ * could be put to use in a speculative execution gadget.
+ * Interleave XOR with PUSH for better uop scheduling:
+ */
++ .if \save_ret
++ pushq %rsi /* pt_regs->si */
++ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
++ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
++ .else
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
++ .endif
+ pushq \rdx /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq \rax /* pt_regs->ax */
+ pushq %r8 /* pt_regs->r8 */
+- xorq %r8, %r8 /* nospec r8 */
++ xorl %r8d, %r8d /* nospec r8 */
+ pushq %r9 /* pt_regs->r9 */
+- xorq %r9, %r9 /* nospec r9 */
++ xorl %r9d, %r9d /* nospec r9 */
+ pushq %r10 /* pt_regs->r10 */
+- xorq %r10, %r10 /* nospec r10 */
++ xorl %r10d, %r10d /* nospec r10 */
+ pushq %r11 /* pt_regs->r11 */
+- xorq %r11, %r11 /* nospec r11*/
++ xorl %r11d, %r11d /* nospec r11*/
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx*/
+ pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp*/
+ pushq %r12 /* pt_regs->r12 */
+- xorq %r12, %r12 /* nospec r12*/
++ xorl %r12d, %r12d /* nospec r12*/
+ pushq %r13 /* pt_regs->r13 */
+- xorq %r13, %r13 /* nospec r13*/
++ xorl %r13d, %r13d /* nospec r13*/
+ pushq %r14 /* pt_regs->r14 */
+- xorq %r14, %r14 /* nospec r14*/
++ xorl %r14d, %r14d /* nospec r14*/
+ pushq %r15 /* pt_regs->r15 */
+- xorq %r15, %r15 /* nospec r15*/
++ xorl %r15d, %r15d /* nospec r15*/
+ UNWIND_HINT_REGS
++ .if \save_ret
++ pushq %rsi /* return address on top of stack */
++ .endif
+ .endm
+
+ .macro POP_REGS pop_rdi=1 skip_r11rcx=0
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 2a35b1e0fb90..60c4c342316c 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+- /* Clobbers %ebx */
+- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+
+ /* restore callee-saved registers */
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 4fd9044e72e7..50dcbf640850 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm)
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+- /* Clobbers %rbx */
+- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+
+ /* restore callee-saved registers */
+@@ -871,12 +870,8 @@ ENTRY(\sym)
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
+ .endif
+
+- /* Save all registers in pt_regs */
+- PUSH_AND_CLEAR_REGS
+- ENCODE_FRAME_POINTER
+-
+ .if \paranoid < 2
+- testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
++ testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
+ jnz .Lfrom_usermode_switch_stack_\@
+ .endif
+
+@@ -1123,13 +1118,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
+ #endif
+
+ /*
+- * Switch gs if needed.
++ * Save all registers in pt_regs, and switch gs if needed.
+ * Use slow, but surefire "are we in kernel?" check.
+ * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
+ */
+ ENTRY(paranoid_entry)
+ UNWIND_HINT_FUNC
+ cld
++ PUSH_AND_CLEAR_REGS save_ret=1
++ ENCODE_FRAME_POINTER 8
+ movl $1, %ebx
+ movl $MSR_GS_BASE, %ecx
+ rdmsr
+@@ -1174,12 +1171,14 @@ ENTRY(paranoid_exit)
+ END(paranoid_exit)
+
+ /*
+- * Switch gs if needed.
++ * Save all registers in pt_regs, and switch GS if needed.
+ * Return: EBX=0: came from user mode; EBX=1: otherwise
+ */
+ ENTRY(error_entry)
+- UNWIND_HINT_REGS offset=8
++ UNWIND_HINT_FUNC
+ cld
++ PUSH_AND_CLEAR_REGS save_ret=1
++ ENCODE_FRAME_POINTER 8
+ testb $3, CS+8(%rsp)
+ jz .Lerror_kernelspace
+
+@@ -1570,8 +1569,6 @@ end_repeat_nmi:
+ * frame to point back to repeat_nmi.
+ */
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
+- PUSH_AND_CLEAR_REGS
+- ENCODE_FRAME_POINTER
+
+ /*
+ * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index fd65e016e413..364ea4a207be 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
+- xorq %r8, %r8 /* nospec r8 */
++ xorl %r8d, %r8d /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+- xorq %r9, %r9 /* nospec r9 */
++ xorl %r9d, %r9d /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+- xorq %r10, %r10 /* nospec r10 */
++ xorl %r10d, %r10d /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+- xorq %r11, %r11 /* nospec r11 */
++ xorl %r11d, %r11d /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
+ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
+- xorq %r12, %r12 /* nospec r12 */
++ xorl %r12d, %r12d /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
+- xorq %r13, %r13 /* nospec r13 */
++ xorl %r13d, %r13d /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
+- xorq %r14, %r14 /* nospec r14 */
++ xorl %r14d, %r14d /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
+- xorq %r15, %r15 /* nospec r15 */
++ xorl %r15d, %r15d /* nospec r15 */
+ cld
+
+ /*
+@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
+ pushq %rbp /* pt_regs->cx (stashed in bp) */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
+- xorq %r8, %r8 /* nospec r8 */
++ xorl %r8d, %r8d /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+- xorq %r9, %r9 /* nospec r9 */
++ xorl %r9d, %r9d /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+- xorq %r10, %r10 /* nospec r10 */
++ xorl %r10d, %r10d /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+- xorq %r11, %r11 /* nospec r11 */
++ xorl %r11d, %r11d /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
+ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
+- xorq %r12, %r12 /* nospec r12 */
++ xorl %r12d, %r12d /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
+- xorq %r13, %r13 /* nospec r13 */
++ xorl %r13d, %r13d /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
+- xorq %r14, %r14 /* nospec r14 */
++ xorl %r14d, %r14d /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
+- xorq %r15, %r15 /* nospec r15 */
++ xorl %r15d, %r15d /* nospec r15 */
+
+ /*
+ * User mode is traced as though IRQs are on, and SYSENTER
+@@ -298,9 +298,9 @@ sysret32_from_system_call:
+ */
+ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
+
+- xorq %r8, %r8
+- xorq %r9, %r9
+- xorq %r10, %r10
++ xorl %r8d, %r8d
++ xorl %r9d, %r9d
++ xorl %r10d, %r10d
+ swapgs
+ sysretl
+ END(entry_SYSCALL_compat)
+@@ -358,25 +358,25 @@ ENTRY(entry_INT80_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
+- xorq %r8, %r8 /* nospec r8 */
++ xorl %r8d, %r8d /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+- xorq %r9, %r9 /* nospec r9 */
++ xorl %r9d, %r9d /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+- xorq %r10, %r10 /* nospec r10 */
++ xorl %r10d, %r10d /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+- xorq %r11, %r11 /* nospec r11 */
++ xorl %r11d, %r11d /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp */
+ pushq %r12 /* pt_regs->r12 */
+- xorq %r12, %r12 /* nospec r12 */
++ xorl %r12d, %r12d /* nospec r12 */
+ pushq %r13 /* pt_regs->r13 */
+- xorq %r13, %r13 /* nospec r13 */
++ xorl %r13d, %r13d /* nospec r13 */
+ pushq %r14 /* pt_regs->r14 */
+- xorq %r14, %r14 /* nospec r14 */
++ xorl %r14d, %r14d /* nospec r14 */
+ pushq %r15 /* pt_regs->r15 */
+- xorq %r15, %r15 /* nospec r15 */
++ xorl %r15d, %r15d /* nospec r15 */
+ cld
+
+ /*
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 4d4015ddcf26..c356098b6fb9 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -7,6 +7,8 @@
+ #ifndef _ASM_X86_MACH_DEFAULT_APM_H
+ #define _ASM_X86_MACH_DEFAULT_APM_H
+
++#include <asm/nospec-branch.h>
++
+ #ifdef APM_ZERO_SEGS
+ # define APM_DO_ZERO_SEGS \
+ "pushl %%ds\n\t" \
+@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ * N.B. We do NOT need a cld after the BIOS call
+ * because we always save and restore the flags.
+ */
++ firmware_restrict_branch_speculation_start();
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ "=S" (*esi)
+ : "a" (func), "b" (ebx_in), "c" (ecx_in)
+ : "memory", "cc");
++ firmware_restrict_branch_speculation_end();
+ }
+
+ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ * N.B. We do NOT need a cld after the BIOS call
+ * because we always save and restore the flags.
+ */
++ firmware_restrict_branch_speculation_start();
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ "=S" (si)
+ : "a" (func), "b" (ebx_in), "c" (ecx_in)
+ : "memory", "cc");
++ firmware_restrict_branch_speculation_end();
+ return error;
+ }
+
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index 4d111616524b..1908214b9125 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
+ INDIRECT_THUNK(si)
+ INDIRECT_THUNK(di)
+ INDIRECT_THUNK(bp)
+-asmlinkage void __fill_rsb(void);
+-asmlinkage void __clear_rsb(void);
+-
+ #endif /* CONFIG_RETPOLINE */
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 73b5fff159a4..66c14347c502 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -211,6 +211,7 @@
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 85f6ccb80b91..a399c1ebf6f0 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -6,6 +6,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/processor-flags.h>
+ #include <asm/tlb.h>
++#include <asm/nospec-branch.h>
+
+ /*
+ * We map the EFI regions needed for runtime services non-contiguously,
+@@ -36,8 +37,18 @@
+
+ extern asmlinkage unsigned long efi_call_phys(void *, ...);
+
+-#define arch_efi_call_virt_setup() kernel_fpu_begin()
+-#define arch_efi_call_virt_teardown() kernel_fpu_end()
++#define arch_efi_call_virt_setup() \
++({ \
++ kernel_fpu_begin(); \
++ firmware_restrict_branch_speculation_start(); \
++})
++
++#define arch_efi_call_virt_teardown() \
++({ \
++ firmware_restrict_branch_speculation_end(); \
++ kernel_fpu_end(); \
++})
++
+
+ /*
+ * Wrap all the virtual calls in a way that forces the parameters on the stack.
+@@ -73,6 +84,7 @@ struct efi_scratch {
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __kernel_fpu_begin(); \
++ firmware_restrict_branch_speculation_start(); \
+ \
+ if (efi_scratch.use_pgd) { \
+ efi_scratch.prev_cr3 = __read_cr3(); \
+@@ -91,6 +103,7 @@ struct efi_scratch {
+ __flush_tlb_all(); \
+ } \
+ \
++ firmware_restrict_branch_speculation_end(); \
+ __kernel_fpu_end(); \
+ preempt_enable(); \
+ })
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index c931b88982a0..1de72ce514cd 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
+ return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
+ #else
+ BUG();
++ return (void *)fix_to_virt(FIX_HOLE);
+ #endif
+ }
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 81a1be326571..d0dabeae0505 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -8,6 +8,50 @@
+ #include <asm/cpufeatures.h>
+ #include <asm/msr-index.h>
+
++/*
++ * Fill the CPU return stack buffer.
++ *
++ * Each entry in the RSB, if used for a speculative 'ret', contains an
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
++ *
++ * This is required in various cases for retpoline and IBRS-based
++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
++ * eliminate potentially bogus entries from the RSB, and sometimes
++ * purely to ensure that it doesn't get empty, which on some CPUs would
++ * allow predictions from other (unwanted!) sources to be used.
++ *
++ * We define a CPP macro such that it can be used from both .S files and
++ * inline assembly. It's possible to do a .macro and then include that
++ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
++ */
++
++#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
++#define RSB_FILL_LOOPS 16 /* To avoid underflow */
++
++/*
++ * Google experimented with loop-unrolling and this turned out to be
++ * the optimal version — two calls, each with their own speculation
++ * trap should their return address end up getting used, in a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr, sp) \
++ mov $(nr/2), reg; \
++771: \
++ call 772f; \
++773: /* speculation trap */ \
++ pause; \
++ lfence; \
++ jmp 773b; \
++772: \
++ call 774f; \
++775: /* speculation trap */ \
++ pause; \
++ lfence; \
++ jmp 775b; \
++774: \
++ dec reg; \
++ jnz 771b; \
++ add $(BITS_PER_LONG/8) * nr, sp;
++
+ #ifdef __ASSEMBLY__
+
+ /*
+@@ -23,6 +67,18 @@
+ .popsection
+ .endm
+
++/*
++ * This should be used immediately before an indirect jump/call. It tells
++ * objtool the subsequent indirect jump/call is vouched safe for retpoline
++ * builds.
++ */
++.macro ANNOTATE_RETPOLINE_SAFE
++ .Lannotate_\@:
++ .pushsection .discard.retpoline_safe
++ _ASM_PTR .Lannotate_\@
++ .popsection
++.endm
++
+ /*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+@@ -59,9 +115,9 @@
+ .macro JMP_NOSPEC reg:req
+ #ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+- ALTERNATIVE_2 __stringify(jmp *\reg), \
++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
+ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
+- __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+ #else
+ jmp *\reg
+ #endif
+@@ -70,18 +126,25 @@
+ .macro CALL_NOSPEC reg:req
+ #ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+- ALTERNATIVE_2 __stringify(call *\reg), \
++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
+ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+- __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
+ #else
+ call *\reg
+ #endif
+ .endm
+
+-/* This clobbers the BX register */
+-.macro FILL_RETURN_BUFFER nr:req ftr:req
++ /*
++ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
++ * monstrosity above, manually.
++ */
++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+ #ifdef CONFIG_RETPOLINE
+- ALTERNATIVE "", "call __clear_rsb", \ftr
++ ANNOTATE_NOSPEC_ALTERNATIVE
++ ALTERNATIVE "jmp .Lskip_rsb_\@", \
++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
++ \ftr
++.Lskip_rsb_\@:
+ #endif
+ .endm
+
+@@ -93,6 +156,12 @@
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+
++#define ANNOTATE_RETPOLINE_SAFE \
++ "999:\n\t" \
++ ".pushsection .discard.retpoline_safe\n\t" \
++ _ASM_PTR " 999b\n\t" \
++ ".popsection\n\t"
++
+ #if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+ /*
+@@ -102,6 +171,7 @@
+ # define CALL_NOSPEC \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+ ALTERNATIVE( \
++ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
+ X86_FEATURE_RETPOLINE)
+@@ -156,26 +226,54 @@ extern char __indirect_thunk_end[];
+ static inline void vmexit_fill_RSB(void)
+ {
+ #ifdef CONFIG_RETPOLINE
+- alternative_input("",
+- "call __fill_rsb",
+- X86_FEATURE_RETPOLINE,
+- ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
++ unsigned long loops;
++
++ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
++ ALTERNATIVE("jmp 910f",
++ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
++ X86_FEATURE_RETPOLINE)
++ "910:"
++ : "=r" (loops), ASM_CALL_CONSTRAINT
++ : : "memory" );
+ #endif
+ }
+
++#define alternative_msr_write(_msr, _val, _feature) \
++ asm volatile(ALTERNATIVE("", \
++ "movl %[msr], %%ecx\n\t" \
++ "movl %[val], %%eax\n\t" \
++ "movl $0, %%edx\n\t" \
++ "wrmsr", \
++ _feature) \
++ : : [msr] "i" (_msr), [val] "i" (_val) \
++ : "eax", "ecx", "edx", "memory")
++
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- asm volatile(ALTERNATIVE("",
+- "movl %[msr], %%ecx\n\t"
+- "movl %[val], %%eax\n\t"
+- "movl $0, %%edx\n\t"
+- "wrmsr",
+- X86_FEATURE_USE_IBPB)
+- : : [msr] "i" (MSR_IA32_PRED_CMD),
+- [val] "i" (PRED_CMD_IBPB)
+- : "eax", "ecx", "edx", "memory");
++ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
++ X86_FEATURE_USE_IBPB);
+ }
+
++/*
++ * With retpoline, we must use IBRS to restrict branch prediction
++ * before calling into firmware.
++ *
++ * (Implemented as CPP macros due to header hell.)
++ */
++#define firmware_restrict_branch_speculation_start() \
++do { \
++ preempt_disable(); \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
++ X86_FEATURE_USE_IBRS_FW); \
++} while (0)
++
++#define firmware_restrict_branch_speculation_end() \
++do { \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
++ X86_FEATURE_USE_IBRS_FW); \
++ preempt_enable(); \
++} while (0)
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 554841fab717..c83a2f418cea 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/pgtable_types.h>
+ #include <asm/asm.h>
++#include <asm/nospec-branch.h>
+
+ #include <asm/paravirt_types.h>
+
+@@ -879,23 +880,27 @@ extern void default_banner(void);
+
+ #define INTERRUPT_RETURN \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
++ ANNOTATE_RETPOLINE_SAFE; \
++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
+
+ #define DISABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #define ENABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #ifdef CONFIG_X86_32
+ #define GET_CR0_INTO_EAX \
+ push %ecx; push %edx; \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+ pop %edx; pop %ecx
+ #else /* !CONFIG_X86_32 */
+@@ -917,21 +922,25 @@ extern void default_banner(void);
+ */
+ #define SWAPGS \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
+- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
++ ANNOTATE_RETPOLINE_SAFE; \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
+ )
+
+ #define GET_CR2_INTO_RAX \
+- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
++ ANNOTATE_RETPOLINE_SAFE; \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
+
+ #define USERGS_SYSRET64 \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
+ CLBR_NONE, \
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
++ ANNOTATE_RETPOLINE_SAFE; \
++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
+
+ #ifdef CONFIG_DEBUG_ENTRY
+ #define SAVE_FLAGS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
++ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+ #endif
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index f624f1f10316..180bc0bff0fb 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -43,6 +43,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/kmap_types.h>
+ #include <asm/pgtable_types.h>
++#include <asm/nospec-branch.h>
+
+ struct page;
+ struct thread_struct;
+@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
++#define PARAVIRT_CALL \
++ ANNOTATE_RETPOLINE_SAFE \
++ "call *%c[paravirt_opptr];"
+
+ /*
+ * These macros are intended to wrap calls through one of the paravirt
+diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
+index 4e44250e7d0d..d65171120e90 100644
+--- a/arch/x86/include/asm/refcount.h
++++ b/arch/x86/include/asm/refcount.h
+@@ -67,13 +67,13 @@ static __always_inline __must_check
+ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+ {
+ GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
+- r->refs.counter, "er", i, "%0", e);
++ r->refs.counter, "er", i, "%0", e, "cx");
+ }
+
+ static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
+ {
+ GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
+- r->refs.counter, "%0", e);
++ r->refs.counter, "%0", e, "cx");
+ }
+
+ static __always_inline __must_check
+diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
+index f91c365e57c3..4914a3e7c803 100644
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -2,8 +2,7 @@
+ #ifndef _ASM_X86_RMWcc
+ #define _ASM_X86_RMWcc
+
+-#define __CLOBBERS_MEM "memory"
+-#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx"
++#define __CLOBBERS_MEM(clb...) "memory", ## clb
+
+ #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
+
+@@ -40,18 +39,19 @@ do { \
+ #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+
+ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+- __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
++ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
+
+-#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \
++#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
+ __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
+- __CLOBBERS_MEM_CC_CX)
++ __CLOBBERS_MEM(clobbers))
+
+ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
+- __CLOBBERS_MEM, vcon (val))
++ __CLOBBERS_MEM(), vcon (val))
+
+-#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \
++#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \
++ clobbers...) \
+ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
+- __CLOBBERS_MEM_CC_CX, vcon (val))
++ __CLOBBERS_MEM(clobbers), vcon (val))
+
+ #endif /* _ASM_X86_RMWcc */
+diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
+index d6baf23782bc..5c019d23d06b 100644
+--- a/arch/x86/include/asm/sections.h
++++ b/arch/x86/include/asm/sections.h
+@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
+
+ #if defined(CONFIG_X86_64)
+ extern char __end_rodata_hpage_align[];
++extern char __entry_trampoline_start[], __entry_trampoline_end[];
+ #endif
+
+ #endif /* _ASM_X86_SECTIONS_H */
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 461f53d27708..a4189762b266 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+ void cpu_disable_common(void);
+ void native_smp_prepare_boot_cpu(void);
+ void native_smp_prepare_cpus(unsigned int max_cpus);
++void calculate_max_logical_packages(void);
+ void native_smp_cpus_done(unsigned int max_cpus);
+ void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
+index 91723461dc1f..435db58a7bad 100644
+--- a/arch/x86/include/uapi/asm/mce.h
++++ b/arch/x86/include/uapi/asm/mce.h
+@@ -30,6 +30,7 @@ struct mce {
+ __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
+ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
++ __u32 microcode;/* Microcode revision */
+ };
+
+ #define MCE_GET_RECORD_LEN _IOR('M', 1, int)
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 8a7963421460..93d5f55cd8b6 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void)
+ do {
+ rep_nop();
+ now = rdtsc();
+- } while ((now - start) < 40000000000UL / HZ &&
++ } while ((now - start) < 40000000000ULL / HZ &&
+ time_before_eq(jiffies, end));
+ }
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d71c8b54b696..bfca937bdcc3 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -300,6 +300,15 @@ static void __init spectre_v2_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+ }
++
++ /*
++ * Retpoline means the kernel is safe because it has no indirect
++ * branches. But firmware isn't, so use IBRS to protect that.
++ */
++ if (boot_cpu_has(X86_FEATURE_IBRS)) {
++ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
++ pr_info("Enabling Restricted Speculation for firmware calls\n");
++ }
+ }
+
+ #undef pr_fmt
+@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+
+- return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ spectre_v2_module_string());
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index d19e903214b4..4aa9fd379390 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+ {
+ int i;
+
++ /*
++ * We know that the hypervisor lie to us on the microcode version so
++ * we may as well hope that it is running the correct version.
++ */
++ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++ return false;
++
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_stepping == spectre_bad_microcodes[i].stepping)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 2fe482f6ecd8..7a16a0fd1cb1 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -57,6 +57,9 @@
+
+ static DEFINE_MUTEX(mce_log_mutex);
+
++/* sysfs synchronization */
++static DEFINE_MUTEX(mce_sysfs_mutex);
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/mce.h>
+
+@@ -131,6 +134,8 @@ void mce_setup(struct mce *m)
+
+ if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
+ rdmsrl(MSR_PPIN, m->ppin);
++
++ m->microcode = boot_cpu_data.microcode;
+ }
+
+ DEFINE_PER_CPU(struct mce, injectm);
+@@ -263,7 +268,7 @@ static void __print_mce(struct mce *m)
+ */
+ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
+ m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
+- cpu_data(m->extcpu).microcode);
++ m->microcode);
+ }
+
+ static void print_mce(struct mce *m)
+@@ -2078,6 +2083,7 @@ static ssize_t set_ignore_ce(struct device *s,
+ if (kstrtou64(buf, 0, &new) < 0)
+ return -EINVAL;
+
++ mutex_lock(&mce_sysfs_mutex);
+ if (mca_cfg.ignore_ce ^ !!new) {
+ if (new) {
+ /* disable ce features */
+@@ -2090,6 +2096,8 @@ static ssize_t set_ignore_ce(struct device *s,
+ on_each_cpu(mce_enable_ce, (void *)1, 1);
+ }
+ }
++ mutex_unlock(&mce_sysfs_mutex);
++
+ return size;
+ }
+
+@@ -2102,6 +2110,7 @@ static ssize_t set_cmci_disabled(struct device *s,
+ if (kstrtou64(buf, 0, &new) < 0)
+ return -EINVAL;
+
++ mutex_lock(&mce_sysfs_mutex);
+ if (mca_cfg.cmci_disabled ^ !!new) {
+ if (new) {
+ /* disable cmci */
+@@ -2113,6 +2122,8 @@ static ssize_t set_cmci_disabled(struct device *s,
+ on_each_cpu(mce_enable_ce, NULL, 1);
+ }
+ }
++ mutex_unlock(&mce_sysfs_mutex);
++
+ return size;
+ }
+
+@@ -2120,8 +2131,19 @@ static ssize_t store_int_with_restart(struct device *s,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+ {
+- ssize_t ret = device_store_int(s, attr, buf, size);
++ unsigned long old_check_interval = check_interval;
++ ssize_t ret = device_store_ulong(s, attr, buf, size);
++
++ if (check_interval == old_check_interval)
++ return ret;
++
++ if (check_interval < 1)
++ check_interval = 1;
++
++ mutex_lock(&mce_sysfs_mutex);
+ mce_restart();
++ mutex_unlock(&mce_sysfs_mutex);
++
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index 04a625f0fcda..0f545b3cf926 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -23,6 +23,7 @@
+ #include <asm/nops.h>
+ #include "../entry/calling.h"
+ #include <asm/export.h>
++#include <asm/nospec-branch.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
+
+ /* Ensure I am executing from virtual addresses */
+ movq $1f, %rax
++ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rax
+ 1:
+ UNWIND_HINT_EMPTY
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index bd36f3c33cd0..0715f827607c 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
+
+ bool arch_within_kprobe_blacklist(unsigned long addr)
+ {
++ bool is_in_entry_trampoline_section = false;
++
++#ifdef CONFIG_X86_64
++ is_in_entry_trampoline_section =
++ (addr >= (unsigned long)__entry_trampoline_start &&
++ addr < (unsigned long)__entry_trampoline_end);
++#endif
+ return (addr >= (unsigned long)__kprobes_text_start &&
+ addr < (unsigned long)__kprobes_text_end) ||
+ (addr >= (unsigned long)__entry_text_start &&
+- addr < (unsigned long)__entry_text_end);
++ addr < (unsigned long)__entry_text_end) ||
++ is_in_entry_trampoline_section;
+ }
+
+ int __init arch_init_kprobes(void)
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 844279c3ff4a..d0829a6e1bf5 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1282,11 +1282,10 @@ void __init native_smp_prepare_boot_cpu(void)
+ cpu_set_state_online(me);
+ }
+
+-void __init native_smp_cpus_done(unsigned int max_cpus)
++void __init calculate_max_logical_packages(void)
+ {
+ int ncpus;
+
+- pr_debug("Boot done\n");
+ /*
+ * Today neither Intel nor AMD support heterogenous systems so
+ * extrapolate the boot cpu's data to all packages.
+@@ -1294,6 +1293,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
+ ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
+ __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
++}
++
++void __init native_smp_cpus_done(unsigned int max_cpus)
++{
++ pr_debug("Boot done\n");
++
++ calculate_max_logical_packages();
+
+ if (x86_has_numa_in_package)
+ set_sched_topology(x86_numa_in_package_topology);
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 9b138a06c1a4..b854ebf5851b 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -118,9 +118,11 @@ SECTIONS
+
+ #ifdef CONFIG_X86_64
+ . = ALIGN(PAGE_SIZE);
++ VMLINUX_SYMBOL(__entry_trampoline_start) = .;
+ _entry_trampoline = .;
+ *(.entry_trampoline)
+ . = ALIGN(PAGE_SIZE);
++ VMLINUX_SYMBOL(__entry_trampoline_end) = .;
+ ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+ #endif
+
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 69a473919260..f23934bbaf4e 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -27,7 +27,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
+ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ lib-$(CONFIG_RETPOLINE) += retpoline.o
+-OBJECT_FILES_NON_STANDARD_retpoline.o :=y
+
+ obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 480edc3a5e03..c909961e678a 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -7,7 +7,6 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ #include <asm/nospec-branch.h>
+-#include <asm/bitsperlong.h>
+
+ .macro THUNK reg
+ .section .text.__x86.indirect_thunk
+@@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
+ GENERATE_THUNK(r14)
+ GENERATE_THUNK(r15)
+ #endif
+-
+-/*
+- * Fill the CPU return stack buffer.
+- *
+- * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+- *
+- * This is required in various cases for retpoline and IBRS-based
+- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+- * eliminate potentially bogus entries from the RSB, and sometimes
+- * purely to ensure that it doesn't get empty, which on some CPUs would
+- * allow predictions from other (unwanted!) sources to be used.
+- *
+- * Google experimented with loop-unrolling and this turned out to be
+- * the optimal version - two calls, each with their own speculation
+- * trap should their return address end up getting used, in a loop.
+- */
+-.macro STUFF_RSB nr:req sp:req
+- mov $(\nr / 2), %_ASM_BX
+- .align 16
+-771:
+- call 772f
+-773: /* speculation trap */
+- pause
+- lfence
+- jmp 773b
+- .align 16
+-772:
+- call 774f
+-775: /* speculation trap */
+- pause
+- lfence
+- jmp 775b
+- .align 16
+-774:
+- dec %_ASM_BX
+- jnz 771b
+- add $((BITS_PER_LONG/8) * \nr), \sp
+-.endm
+-
+-#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+-
+-ENTRY(__fill_rsb)
+- STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
+- ret
+-END(__fill_rsb)
+-EXPORT_SYMBOL_GPL(__fill_rsb)
+-
+-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+-
+-ENTRY(__clear_rsb)
+- STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
+- ret
+-END(__clear_rsb)
+-EXPORT_SYMBOL_GPL(__clear_rsb)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 800de815519c..c88573d90f3e 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ tsk = current;
+ mm = tsk->mm;
+
+- /*
+- * Detect and handle instructions that would cause a page fault for
+- * both a tracked kernel page and a userspace page.
+- */
+ prefetchw(&mm->mmap_sem);
+
+ if (unlikely(kmmio_fault(regs, address)))
+diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
+index 01f682cf77a8..40a6085063d6 100644
+--- a/arch/x86/mm/mem_encrypt_boot.S
++++ b/arch/x86/mm/mem_encrypt_boot.S
+@@ -15,6 +15,7 @@
+ #include <asm/page.h>
+ #include <asm/processor-flags.h>
+ #include <asm/msr-index.h>
++#include <asm/nospec-branch.h>
+
+ .text
+ .code64
+@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
+ movq %rax, %r8 /* Workarea encryption routine */
+ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
+
++ ANNOTATE_RETPOLINE_SAFE
+ call *%rax /* Call the encryption routine */
+
+ pop %r12
+diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
+index de53bd15df5a..24bb7598774e 100644
+--- a/arch/x86/realmode/rm/trampoline_64.S
++++ b/arch/x86/realmode/rm/trampoline_64.S
+@@ -102,7 +102,7 @@ ENTRY(startup_32)
+ * don't we'll eventually crash trying to execute encrypted
+ * instructions.
+ */
+- bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
++ btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+ jnc .Ldone
+ movl $MSR_K8_SYSCFG, %ecx
+ rdmsr
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 77c959cf81e7..7a43b2ae19f1 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
+
+ if (xen_hvm_domain())
+ native_smp_cpus_done(max_cpus);
++ else
++ calculate_max_logical_packages();
+
+ if (xen_have_vcpu_info_placement)
+ return;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d5fe720cf149..89d2ee00cced 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
+ struct iov_iter i;
+ ssize_t bw;
+
+- iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
++ iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
+
+ file_start_write(file);
+ bw = vfs_iter_write(file, &i, ppos, 0);
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 3cec403a80b3..5294442505cb 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -413,6 +413,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ if (chip->dev.parent)
+ pm_runtime_get_sync(chip->dev.parent);
+
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, true);
++
+ /* Store the decision as chip->locality will be changed. */
+ need_locality = chip->locality == -1;
+
+@@ -489,6 +492,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ chip->locality = -1;
+ }
+ out_no_locality:
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, false);
++
+ if (chip->dev.parent)
+ pm_runtime_put_sync(chip->dev.parent);
+
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index e2d1055fb814..f08949a5f678 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
+ }
+ #endif
+
+-#ifdef CONFIG_X86
+-#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+-#define ILB_REMAP_SIZE 0x100
+-#define LPC_CNTRL_REG_OFFSET 0x84
+-#define LPC_CLKRUN_EN (1 << 2)
+-
+-static void __iomem *ilb_base_addr;
+-
+-static inline bool is_bsw(void)
+-{
+- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+-}
+-
+-/**
+- * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
+- */
+-static void tpm_platform_begin_xfer(void)
+-{
+- u32 clkrun_val;
+-
+- if (!is_bsw())
+- return;
+-
+- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+-
+- /* Disable LPC CLKRUN# */
+- clkrun_val &= ~LPC_CLKRUN_EN;
+- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+-
+- /*
+- * Write any random value on port 0x80 which is on LPC, to make
+- * sure LPC clock is running before sending any TPM command.
+- */
+- outb(0xCC, 0x80);
+-
+-}
+-
+-/**
+- * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
+- */
+-static void tpm_platform_end_xfer(void)
+-{
+- u32 clkrun_val;
+-
+- if (!is_bsw())
+- return;
+-
+- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+-
+- /* Enable LPC CLKRUN# */
+- clkrun_val |= LPC_CLKRUN_EN;
+- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+-
+- /*
+- * Write any random value on port 0x80 which is on LPC, to make
+- * sure LPC clock is running before sending any TPM command.
+- */
+- outb(0xCC, 0x80);
+-
+-}
+-#else
+-static inline bool is_bsw(void)
+-{
+- return false;
+-}
+-
+-static void tpm_platform_begin_xfer(void)
+-{
+-}
+-
+-static void tpm_platform_end_xfer(void)
+-{
+-}
+-#endif
+-
+ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- tpm_platform_begin_xfer();
+-
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+
+- tpm_platform_end_xfer();
+-
+ return 0;
+ }
+
+@@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- tpm_platform_begin_xfer();
+-
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+
+- tpm_platform_end_xfer();
+-
+ return 0;
+ }
+
+@@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- tpm_platform_begin_xfer();
+-
+ *result = ioread16(phy->iobase + addr);
+
+- tpm_platform_end_xfer();
+-
+ return 0;
+ }
+
+@@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- tpm_platform_begin_xfer();
+-
+ *result = ioread32(phy->iobase + addr);
+
+- tpm_platform_end_xfer();
+-
+ return 0;
+ }
+
+@@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- tpm_platform_begin_xfer();
+-
+ iowrite32(value, phy->iobase + addr);
+
+- tpm_platform_end_xfer();
+-
+ return 0;
+ }
+
+@@ -461,11 +366,6 @@ static int __init init_tis(void)
+ if (rc)
+ goto err_force;
+
+-#ifdef CONFIG_X86
+- if (is_bsw())
+- ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+- ILB_REMAP_SIZE);
+-#endif
+ rc = platform_driver_register(&tis_drv);
+ if (rc)
+ goto err_platform;
+@@ -484,10 +384,6 @@ static int __init init_tis(void)
+ err_platform:
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+-#ifdef CONFIG_X86
+- if (is_bsw())
+- iounmap(ilb_base_addr);
+-#endif
+ err_force:
+ return rc;
+ }
+@@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
+ pnp_unregister_driver(&tis_pnp_driver);
+ platform_driver_unregister(&tis_drv);
+
+-#ifdef CONFIG_X86
+- if (is_bsw())
+- iounmap(ilb_base_addr);
+-#endif
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+ }
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 7561922bc8f8..08ae49dee8b1 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -31,6 +31,8 @@
+ #include "tpm.h"
+ #include "tpm_tis_core.h"
+
++static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
++
+ /* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+@@ -422,19 +424,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+ int i, rc;
+ u32 did_vid;
+
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, true);
++
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0)
+- return rc;
++ goto out;
+
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
+ continue;
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+ sizeof(vendor_timeout_overrides[i].timeout_us));
+- return true;
++ rc = true;
+ }
+
+- return false;
++ rc = false;
++
++out:
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, false);
++
++ return rc;
+ }
+
+ /*
+@@ -654,14 +665,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
+ u32 interrupt;
+ int rc;
+
++ tpm_tis_clkrun_enable(chip, true);
++
+ rc = tpm_tis_read32(priv, reg, &interrupt);
+ if (rc < 0)
+ interrupt = 0;
+
+ tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
++
++ tpm_tis_clkrun_enable(chip, false);
++
++ if (priv->ilb_base_addr)
++ iounmap(priv->ilb_base_addr);
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_remove);
+
++/**
++ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
++ * of a single TPM command
++ * @chip: TPM chip to use
++ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
++ * 0 - Enable CLKRUN protocol
++ * Call this function directly in tpm_tis_remove() in error or driver removal
++ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
++ */
++static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
++{
++ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
++ u32 clkrun_val;
++
++ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
++ !data->ilb_base_addr)
++ return;
++
++ if (value) {
++ data->clkrun_enabled++;
++ if (data->clkrun_enabled > 1)
++ return;
++ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
++
++ /* Disable LPC CLKRUN# */
++ clkrun_val &= ~LPC_CLKRUN_EN;
++ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
++
++ /*
++ * Write any random value on port 0x80 which is on LPC, to make
++ * sure LPC clock is running before sending any TPM command.
++ */
++ outb(0xCC, 0x80);
++ } else {
++ data->clkrun_enabled--;
++ if (data->clkrun_enabled)
++ return;
++
++ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
++
++ /* Enable LPC CLKRUN# */
++ clkrun_val |= LPC_CLKRUN_EN;
++ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
++
++ /*
++ * Write any random value on port 0x80 which is on LPC, to make
++ * sure LPC clock is running before sending any TPM command.
++ */
++ outb(0xCC, 0x80);
++ }
++}
++
+ static const struct tpm_class_ops tpm_tis = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_status,
+@@ -674,6 +744,7 @@ static const struct tpm_class_ops tpm_tis = {
+ .req_canceled = tpm_tis_req_canceled,
+ .request_locality = request_locality,
+ .relinquish_locality = release_locality,
++ .clk_enable = tpm_tis_clkrun_enable,
+ };
+
+ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+@@ -681,6 +752,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ acpi_handle acpi_dev_handle)
+ {
+ u32 vendor, intfcaps, intmask;
++ u32 clkrun_val;
+ u8 rid;
+ int rc, probe;
+ struct tpm_chip *chip;
+@@ -701,6 +773,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ priv->phy_ops = phy_ops;
+ dev_set_drvdata(&chip->dev, priv);
+
++ if (is_bsw()) {
++ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
++ ILB_REMAP_SIZE);
++ if (!priv->ilb_base_addr)
++ return -ENOMEM;
++
++ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
++ /* Check if CLKRUN# is already not enabled in the LPC bus */
++ if (!(clkrun_val & LPC_CLKRUN_EN)) {
++ iounmap(priv->ilb_base_addr);
++ priv->ilb_base_addr = NULL;
++ }
++ }
++
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, true);
++
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+@@ -791,9 +880,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ }
+ }
+
+- return tpm_chip_register(chip);
++ rc = tpm_chip_register(chip);
++ if (rc)
++ goto out_err;
++
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, false);
++
++ return 0;
+ out_err:
++ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
++ chip->ops->clk_enable(chip, false);
++
+ tpm_tis_remove(chip);
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_core_init);
+@@ -805,22 +905,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+ u32 intmask;
+ int rc;
+
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, true);
++
+ /* reenable interrupts that device may have lost or
+ * BIOS/firmware may have disabled
+ */
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+ if (rc < 0)
+- return;
++ goto out;
+
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+- return;
++ goto out;
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
++
++out:
++ if (chip->ops->clk_enable != NULL)
++ chip->ops->clk_enable(chip, false);
++
++ return;
+ }
+
+ int tpm_tis_resume(struct device *dev)
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 6bbac319ff3b..d5c6a2e952b3 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -79,6 +79,11 @@ enum tis_defaults {
+ #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+ #define TPM_RID(l) (0x0F04 | ((l) << 12))
+
++#define LPC_CNTRL_OFFSET 0x84
++#define LPC_CLKRUN_EN (1 << 2)
++#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
++#define ILB_REMAP_SIZE 0x100
++
+ enum tpm_tis_flags {
+ TPM_TIS_ITPM_WORKAROUND = BIT(0),
+ };
+@@ -89,6 +94,8 @@ struct tpm_tis_data {
+ int irq;
+ bool irq_tested;
+ unsigned int flags;
++ void __iomem *ilb_base_addr;
++ u16 clkrun_enabled;
+ wait_queue_head_t int_queue;
+ wait_queue_head_t read_queue;
+ const struct tpm_tis_phy_ops *phy_ops;
+@@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
+ return data->phy_ops->write32(data, addr, value);
+ }
+
++static inline bool is_bsw(void)
++{
++#ifdef CONFIG_X86
++ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
++#else
++ return false;
++#endif
++}
++
+ void tpm_tis_remove(struct tpm_chip *chip);
+ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 57afad79f55d..8fa850a070e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
+ size_t size;
+ u32 retry = 3;
+
++ if (amdgpu_acpi_pcie_notify_device_ready(adev))
++ return -EINVAL;
++
+ /* Get the device handle */
+ handle = ACPI_HANDLE(&adev->pdev->dev);
+ if (!handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index df9cbc78e168..21e7ae159dff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -737,9 +737,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (encoder) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+@@ -758,8 +760,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
+ /* check acpi lid status ??? */
+
+ amdgpu_connector_update_scratch_regs(connector, ret);
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
++
+ return ret;
+ }
+
+@@ -869,9 +875,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ encoder = amdgpu_connector_best_single_encoder(connector);
+ if (!encoder)
+@@ -925,8 +933,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
+ amdgpu_connector_update_scratch_regs(connector, ret);
+
+ out:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+@@ -989,9 +999,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool dret = false, broken_edid = false;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+@@ -1116,8 +1128,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
+ amdgpu_connector_update_scratch_regs(connector, ret);
+
+ exit:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+@@ -1360,9 +1374,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
+ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+ int r;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+@@ -1430,8 +1446,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
+
+ amdgpu_connector_update_scratch_regs(connector, ret);
+ out:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index e8bd50cf9785..9df2a8c7d35d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -297,12 +297,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+ if (adev->uvd.vcpu_bo == NULL)
+ return 0;
+
+- for (i = 0; i < adev->uvd.max_handles; ++i)
+- if (atomic_read(&adev->uvd.handles[i]))
+- break;
++ /* only valid for physical mode */
++ if (adev->asic_type < CHIP_POLARIS10) {
++ for (i = 0; i < adev->uvd.max_handles; ++i)
++ if (atomic_read(&adev->uvd.handles[i]))
++ break;
+
+- if (i == AMDGPU_MAX_UVD_HANDLES)
+- return 0;
++ if (i == adev->uvd.max_handles)
++ return 0;
++ }
+
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 419ba0ce7ee5..356ca560c80e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -4403,34 +4403,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
+ case CHIP_KAVERI:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+- if ((adev->pdev->device == 0x1304) ||
+- (adev->pdev->device == 0x1305) ||
+- (adev->pdev->device == 0x130C) ||
+- (adev->pdev->device == 0x130F) ||
+- (adev->pdev->device == 0x1310) ||
+- (adev->pdev->device == 0x1311) ||
+- (adev->pdev->device == 0x131C)) {
+- adev->gfx.config.max_cu_per_sh = 8;
+- adev->gfx.config.max_backends_per_se = 2;
+- } else if ((adev->pdev->device == 0x1309) ||
+- (adev->pdev->device == 0x130A) ||
+- (adev->pdev->device == 0x130D) ||
+- (adev->pdev->device == 0x1313) ||
+- (adev->pdev->device == 0x131D)) {
+- adev->gfx.config.max_cu_per_sh = 6;
+- adev->gfx.config.max_backends_per_se = 2;
+- } else if ((adev->pdev->device == 0x1306) ||
+- (adev->pdev->device == 0x1307) ||
+- (adev->pdev->device == 0x130B) ||
+- (adev->pdev->device == 0x130E) ||
+- (adev->pdev->device == 0x1315) ||
+- (adev->pdev->device == 0x131B)) {
+- adev->gfx.config.max_cu_per_sh = 4;
+- adev->gfx.config.max_backends_per_se = 1;
+- } else {
+- adev->gfx.config.max_cu_per_sh = 3;
+- adev->gfx.config.max_backends_per_se = 1;
+- }
++ adev->gfx.config.max_cu_per_sh = 8;
++ adev->gfx.config.max_backends_per_se = 2;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 8284d5dbfc30..4c178feeb4bd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -31,6 +31,7 @@
+ #include "amdgpu_uvd.h"
+ #include "amdgpu_vce.h"
+ #include "atom.h"
++#include "amd_pcie.h"
+ #include "amdgpu_powerplay.h"
+ #include "sid.h"
+ #include "si_ih.h"
+@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ {
+ struct pci_dev *root = adev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+- u32 speed_cntl, mask, current_data_rate;
+- int ret, i;
++ u32 speed_cntl, current_data_rate;
++ int i;
+ u16 tmp16;
+
+ if (pci_is_root_bus(adev->pdev->bus))
+@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+- if (ret != 0)
+- return;
+-
+- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
++ if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
++ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+- if (mask & DRM_PCIE_SPEED_80) {
++ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+- } else if (mask & DRM_PCIE_SPEED_50) {
++ } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ if (!gpu_pos)
+ return;
+
+- if (mask & DRM_PCIE_SPEED_80) {
++ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+- if (mask & DRM_PCIE_SPEED_80)
++ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ tmp16 |= 3;
+- else if (mask & DRM_PCIE_SPEED_50)
++ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ tmp16 |= 2;
+ else
+ tmp16 |= 1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 3af322adae76..ea80b7ca5c37 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -26,6 +26,7 @@
+ #include "amdgpu_pm.h"
+ #include "amdgpu_dpm.h"
+ #include "amdgpu_atombios.h"
++#include "amd_pcie.h"
+ #include "sid.h"
+ #include "r600_dpm.h"
+ #include "si_dpm.h"
+@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+ }
+ }
+
+-static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+- u32 sys_mask,
+- enum amdgpu_pcie_gen asic_gen,
+- enum amdgpu_pcie_gen default_gen)
+-{
+- switch (asic_gen) {
+- case AMDGPU_PCIE_GEN1:
+- return AMDGPU_PCIE_GEN1;
+- case AMDGPU_PCIE_GEN2:
+- return AMDGPU_PCIE_GEN2;
+- case AMDGPU_PCIE_GEN3:
+- return AMDGPU_PCIE_GEN3;
+- default:
+- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+- return AMDGPU_PCIE_GEN3;
+- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+- return AMDGPU_PCIE_GEN2;
+- else
+- return AMDGPU_PCIE_GEN1;
+- }
+- return AMDGPU_PCIE_GEN1;
+-}
+-
+ static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+ u32 *p, u32 *u)
+ {
+@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+ table->ACPIState.levels[0].vddc.index,
+ &table->ACPIState.levels[0].std_vddc);
+ }
+- table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+- si_pi->sys_pcie_mask,
+- si_pi->boot_pcie_gen,
+- AMDGPU_PCIE_GEN1);
++ table->ACPIState.levels[0].gen2PCIE =
++ (u8)amdgpu_get_pcie_gen_support(adev,
++ si_pi->sys_pcie_mask,
++ si_pi->boot_pcie_gen,
++ AMDGPU_PCIE_GEN1);
+
+ if (si_pi->vddc_phase_shed_control)
+ si_populate_phase_shedding_value(adev,
+@@ -7172,10 +7151,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+ pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+ pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+ pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+- pl->pcie_gen = r600_get_pcie_gen_support(adev,
+- si_pi->sys_pcie_mask,
+- si_pi->boot_pcie_gen,
+- clock_info->si.ucPCIEGen);
++ pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
++ si_pi->sys_pcie_mask,
++ si_pi->boot_pcie_gen,
++ clock_info->si.ucPCIEGen);
+
+ /* patch up vddc if necessary */
+ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+@@ -7330,7 +7309,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ struct si_power_info *si_pi;
+ struct atom_clock_dividers dividers;
+ int ret;
+- u32 mask;
+
+ si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+ if (si_pi == NULL)
+@@ -7340,11 +7318,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ eg_pi = &ni_pi->eg;
+ pi = &eg_pi->rv7xx;
+
+- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+- if (ret)
+- si_pi->sys_pcie_mask = 0;
+- else
+- si_pi->sys_pcie_mask = mask;
++ si_pi->sys_pcie_mask =
++ (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
++ CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index e230cc44a0a7..bd6cab5a9f43 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -200,7 +200,8 @@ bool dc_stream_set_cursor_attributes(
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+- if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
++ if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
++ !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
+ continue;
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ continue;
+@@ -276,7 +277,8 @@ bool dc_stream_set_cursor_position(
+ if (pipe_ctx->stream != stream ||
+ (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
+ !pipe_ctx->plane_state ||
+- (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
++ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
++ !pipe_ctx->plane_res.ipp)
+ continue;
+
+ if (pipe_ctx->plane_state->address.type
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index fe88852b4774..00c728260616 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -683,6 +683,7 @@ void dce110_link_encoder_construct(
+ {
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
++ enum bp_result result = BP_RESULT_OK;
+
+ enc110->base.funcs = &dce110_lnk_enc_funcs;
+ enc110->base.ctx = init_data->ctx;
+@@ -757,15 +758,24 @@ void dce110_link_encoder_construct(
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
++ /* default to one to mirror Windows behavior */
++ enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
++
++ result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
++ enc110->base.id, &bp_cap_info);
++
+ /* Override features with DCE-specific values */
+- if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
+- enc110->base.ctx->dc_bios, enc110->base.id,
+- &bp_cap_info)) {
++ if (BP_RESULT_OK == result) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
++ } else {
++ dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
++ "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
++ __func__,
++ result);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index e33ec7fc5d09..6688cdb216e9 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2791,10 +2791,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+- disable_mclk_switching = ((1 < info.display_count) ||
+- disable_mclk_switching_for_frame_lock ||
+- smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+- (mode_info.refresh_rate > 120));
++ if (info.display_count == 0)
++ disable_mclk_switching = false;
++ else
++ disable_mclk_switching = ((1 < info.display_count) ||
++ disable_mclk_switching_for_frame_lock ||
++ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
++ (mode_info.refresh_rate > 120));
+
+ sclk = smu7_ps->performance_levels[0].engine_clock;
+ mclk = smu7_ps->performance_levels[0].memory_clock;
+@@ -4569,13 +4572,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
+ int tmp_result, result = 0;
+ uint32_t sclk_mask = 0, mclk_mask = 0;
+
+- if (hwmgr->chip_id == CHIP_FIJI) {
+- if (request->type == AMD_PP_GFX_PROFILE)
+- smu7_enable_power_containment(hwmgr);
+- else if (request->type == AMD_PP_COMPUTE_PROFILE)
+- smu7_disable_power_containment(hwmgr);
+- }
+-
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
+ return -EINVAL;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index f8d838c2c8ee..9acbefb33bd6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3208,10 +3208,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
+ force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
+
+- disable_mclk_switching = (info.display_count > 1) ||
+- disable_mclk_switching_for_frame_lock ||
+- disable_mclk_switching_for_vr ||
+- force_mclk_high;
++ if (info.display_count == 0)
++ disable_mclk_switching = false;
++ else
++ disable_mclk_switching = (info.display_count > 1) ||
++ disable_mclk_switching_for_frame_lock ||
++ disable_mclk_switching_for_vr ||
++ force_mclk_high;
+
+ sclk = vega10_ps->performance_levels[0].gfx_clock;
+ mclk = vega10_ps->performance_levels[0].mem_clock;
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index 279c1035c12d..5e1f1e2deb52 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -118,6 +118,10 @@ int drm_mode_addfb(struct drm_device *dev,
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
++ if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
++ dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
++ r.pixel_format = DRM_FORMAT_XBGR2101010;
++
+ ret = drm_mode_addfb2(dev, &r, file_priv);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 6dc2dde5b672..7a6b2dc08913 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -654,6 +654,26 @@ static void output_poll_execute(struct work_struct *work)
+ schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ }
+
++/**
++ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
++ *
++ * Determine if %current task is an output poll worker. This can be used
++ * to select distinct code paths for output polling versus other contexts.
++ *
++ * One use case is to avoid a deadlock between the output poll worker and
++ * the autosuspend worker wherein the latter waits for polling to finish
++ * upon calling drm_kms_helper_poll_disable(), while the former waits for
++ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
++ * connector ->detect hook.
++ */
++bool drm_kms_helper_is_poll_worker(void)
++{
++ struct work_struct *work = current_work();
++
++ return work && work->func == output_poll_execute;
++}
++EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
++
+ /**
+ * drm_kms_helper_poll_disable - disable output polling
+ * @dev: drm_device
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 2cf10d17acfb..62004ea403c6 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1827,6 +1827,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ if (IS_GEN9_LP(dev_priv) ||
+ !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+ intel_power_domains_init_hw(dev_priv, true);
++ else
++ intel_display_set_init_power(dev_priv, true);
+
+ i915_gem_sanitize(dev_priv);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 435ed95df144..3d0ae387691f 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+ list_add_tail(&vma->exec_link, &eb->unbound);
+ if (drm_mm_node_allocated(&vma->node))
+ err = i915_vma_unbind(vma);
++ if (unlikely(err))
++ vma->exec_flags = NULL;
+ }
+ return err;
+ }
+@@ -2419,7 +2421,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
+ if (out_fence) {
+ if (err == 0) {
+ fd_install(out_fence_fd, out_fence->file);
+- args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
++ args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
+ args->rsvd2 |= (u64)out_fence_fd << 32;
+ out_fence_fd = -1;
+ } else {
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 59ee808f8fd9..cc2a10f22c3d 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -1301,9 +1301,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ dev_priv->perf.oa.exclusive_stream = NULL;
+- mutex_unlock(&dev_priv->drm.struct_mutex);
+-
+ dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
++ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ free_oa_buffer(dev_priv);
+
+@@ -1755,22 +1754,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
+ * Note: it's only the RCS/Render context that has any OA state.
+ */
+ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+- const struct i915_oa_config *oa_config,
+- bool interruptible)
++ const struct i915_oa_config *oa_config)
+ {
+ struct i915_gem_context *ctx;
+ int ret;
+ unsigned int wait_flags = I915_WAIT_LOCKED;
+
+- if (interruptible) {
+- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+- if (ret)
+- return ret;
+-
+- wait_flags |= I915_WAIT_INTERRUPTIBLE;
+- } else {
+- mutex_lock(&dev_priv->drm.struct_mutex);
+- }
++ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Switch away from any user context. */
+ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
+@@ -1818,8 +1808,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+ }
+
+ out:
+- mutex_unlock(&dev_priv->drm.struct_mutex);
+-
+ return ret;
+ }
+
+@@ -1862,7 +1850,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
+ * to make sure all slices/subslices are ON before writing to NOA
+ * registers.
+ */
+- ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
++ ret = gen8_configure_all_contexts(dev_priv, oa_config);
+ if (ret)
+ return ret;
+
+@@ -1877,7 +1865,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
+ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+ {
+ /* Reset all contexts' slices/subslices configurations. */
+- gen8_configure_all_contexts(dev_priv, NULL, false);
++ gen8_configure_all_contexts(dev_priv, NULL);
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
+ ~GT_NOA_ENABLE));
+@@ -2127,6 +2115,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
+ if (ret)
+ goto err_oa_buf_alloc;
+
++ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
++ if (ret)
++ goto err_lock;
++
+ ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
+ stream->oa_config);
+ if (ret)
+@@ -2134,23 +2126,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
+
+ stream->ops = &i915_oa_stream_ops;
+
+- /* Lock device for exclusive_stream access late because
+- * enable_metric_set() might lock as well on gen8+.
+- */
+- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+- if (ret)
+- goto err_lock;
+-
+ dev_priv->perf.oa.exclusive_stream = stream;
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return 0;
+
+-err_lock:
++err_enable:
+ dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
++ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+-err_enable:
++err_lock:
+ free_oa_buffer(dev_priv);
+
+ err_oa_buf_alloc:
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index 0ddba16fde1b..538a762f7318 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -754,11 +754,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ {
+ struct intel_encoder *encoder;
+
+- if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
+- return NULL;
+-
+ /* MST */
+ if (pipe >= 0) {
++ if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
++ return NULL;
++
+ encoder = dev_priv->av_enc_map[pipe];
+ /*
+ * when bootup, audio driver may not know it is
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 50f8443641b8..a83e18c72f7b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -14463,6 +14463,8 @@ static void sanitize_watermarks(struct drm_device *dev)
+
+ cs->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(intel_state, cs);
++
++ to_intel_crtc_state(crtc->state)->wm = cs->wm;
+ }
+
+ put_state:
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 4dea833f9d1b..847cda4c017c 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1573,12 +1573,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct edid *edid;
+ bool connected = false;
++ struct i2c_adapter *i2c;
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
+- edid = drm_get_edid(connector,
+- intel_gmbus_get_adapter(dev_priv,
+- intel_hdmi->ddc_bus));
++ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
++
++ edid = drm_get_edid(connector, i2c);
++
++ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
++ DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
++ intel_gmbus_force_bit(i2c, true);
++ edid = drm_get_edid(connector, i2c);
++ intel_gmbus_force_bit(i2c, false);
++ }
+
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index 7e115f3927f6..d169bfb98368 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -1844,6 +1844,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
+ CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
++ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 69d6e61a01ec..6ed9cb053dfa 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
+ nv_connector->edid = NULL;
+ }
+
+- ret = pm_runtime_get_sync(connector->dev->dev);
+- if (ret < 0 && ret != -EACCES)
+- return conn_status;
++ /* Outputs are only polled while runtime active, so acquiring a
++ * runtime PM ref here is unnecessary (and would deadlock upon
++ * runtime suspend because it waits for polling to finish).
++ */
++ if (!drm_kms_helper_is_poll_worker()) {
++ ret = pm_runtime_get_sync(connector->dev->dev);
++ if (ret < 0 && ret != -EACCES)
++ return conn_status;
++ }
+
+ nv_encoder = nouveau_connector_ddc_detect(connector);
+ if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
+@@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
+
+ out:
+
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return conn_status;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 584466ef688f..325bff420f5a 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -4426,6 +4426,7 @@ nv50_display_create(struct drm_device *dev)
+ nouveau_display(dev)->fini = nv50_display_fini;
+ disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
++ dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
+ if (nouveau_atomic)
+ dev->driver->driver_features |= DRIVER_ATOMIC;
+
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index a6511918f632..8ce36cf42055 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3228,35 +3228,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
+ case CHIP_KAVERI:
+ rdev->config.cik.max_shader_engines = 1;
+ rdev->config.cik.max_tile_pipes = 4;
+- if ((rdev->pdev->device == 0x1304) ||
+- (rdev->pdev->device == 0x1305) ||
+- (rdev->pdev->device == 0x130C) ||
+- (rdev->pdev->device == 0x130F) ||
+- (rdev->pdev->device == 0x1310) ||
+- (rdev->pdev->device == 0x1311) ||
+- (rdev->pdev->device == 0x131C)) {
+- rdev->config.cik.max_cu_per_sh = 8;
+- rdev->config.cik.max_backends_per_se = 2;
+- } else if ((rdev->pdev->device == 0x1309) ||
+- (rdev->pdev->device == 0x130A) ||
+- (rdev->pdev->device == 0x130D) ||
+- (rdev->pdev->device == 0x1313) ||
+- (rdev->pdev->device == 0x131D)) {
+- rdev->config.cik.max_cu_per_sh = 6;
+- rdev->config.cik.max_backends_per_se = 2;
+- } else if ((rdev->pdev->device == 0x1306) ||
+- (rdev->pdev->device == 0x1307) ||
+- (rdev->pdev->device == 0x130B) ||
+- (rdev->pdev->device == 0x130E) ||
+- (rdev->pdev->device == 0x1315) ||
+- (rdev->pdev->device == 0x1318) ||
+- (rdev->pdev->device == 0x131B)) {
+- rdev->config.cik.max_cu_per_sh = 4;
+- rdev->config.cik.max_backends_per_se = 1;
+- } else {
+- rdev->config.cik.max_cu_per_sh = 3;
+- rdev->config.cik.max_backends_per_se = 1;
+- }
++ rdev->config.cik.max_cu_per_sh = 8;
++ rdev->config.cik.max_backends_per_se = 2;
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_texture_channel_caches = 4;
+ rdev->config.cik.max_gprs = 256;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 59dcefb2df3b..30e129684c7c 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -900,9 +900,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+@@ -925,8 +927,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
+ /* check acpi lid status ??? */
+
+ radeon_connector_update_scratch_regs(connector, ret);
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
++
+ return ret;
+ }
+
+@@ -1040,9 +1046,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ int r;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+@@ -1109,8 +1117,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+ radeon_connector_update_scratch_regs(connector, ret);
+
+ out:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+@@ -1174,9 +1184,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
+ if (!radeon_connector->dac_load_detect)
+ return ret;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+@@ -1188,8 +1200,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
+ if (ret == connector_status_connected)
+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
+ radeon_connector_update_scratch_regs(connector, ret);
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
++
+ return ret;
+ }
+
+@@ -1252,9 +1268,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool dret = false, broken_edid = false;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (radeon_connector->detected_hpd_without_ddc) {
+ force = true;
+@@ -1437,8 +1455,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ }
+
+ exit:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+@@ -1689,9 +1709,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (radeon_dig_connector->is_mst)
+ return connector_status_disconnected;
+
+- r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
+- return connector_status_disconnected;
++ if (!drm_kms_helper_is_poll_worker()) {
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++ }
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+ ret = connector->status;
+@@ -1778,8 +1800,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ }
+
+ out:
+- pm_runtime_mark_last_busy(connector->dev->dev);
+- pm_runtime_put_autosuspend(connector->dev->dev);
++ if (!drm_kms_helper_is_poll_worker()) {
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index ffc10cadcf34..32b577c776b9 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1397,6 +1397,10 @@ int radeon_device_init(struct radeon_device *rdev,
+ if ((rdev->flags & RADEON_IS_PCI) &&
+ (rdev->family <= CHIP_RS740))
+ rdev->need_dma32 = true;
++#ifdef CONFIG_PPC64
++ if (rdev->family == CHIP_CEDAR)
++ rdev->need_dma32 = true;
++#endif
+
+ dma_bits = rdev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 326ad068c15a..4b6542538ff9 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+ static void radeon_pm_update_profile(struct radeon_device *rdev);
+ static void radeon_pm_set_clocks(struct radeon_device *rdev);
+-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
+
+ int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+- /* allow new DPM state to be picked */
+- radeon_pm_compute_clocks_dpm(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+ /* balanced states don't exist at the moment */
+ if (dpm_state == POWER_STATE_TYPE_BALANCED)
+- dpm_state = rdev->pm.dpm.ac_power ?
+- POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
++ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+ restart_search:
+ /* Pick the best power state based on current conditions */
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index d7d042a20ab4..4dff06ab771e 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -534,14 +534,14 @@ int ib_register_device(struct ib_device *device,
+ ret = device->query_device(device, &device->attrs, &uhw);
+ if (ret) {
+ pr_warn("Couldn't query the device attributes\n");
+- goto cache_cleanup;
++ goto cg_cleanup;
+ }
+
+ ret = ib_device_register_sysfs(device, port_callback);
+ if (ret) {
+ pr_warn("Couldn't register device %s with driver model\n",
+ device->name);
+- goto cache_cleanup;
++ goto cg_cleanup;
+ }
+
+ device->reg_state = IB_DEV_REGISTERED;
+@@ -557,6 +557,8 @@ int ib_register_device(struct ib_device *device,
+ mutex_unlock(&device_mutex);
+ return 0;
+
++cg_cleanup:
++ ib_device_unregister_rdmacg(device);
+ cache_cleanup:
+ ib_cache_cleanup_one(device);
+ ib_cache_release_one(device);
+diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
+index 4e1f76730855..9cb801d1fe54 100644
+--- a/drivers/infiniband/core/rdma_core.c
++++ b/drivers/infiniband/core/rdma_core.c
+@@ -407,13 +407,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
+ return ret;
+ }
+
+-static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
++static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
+ {
+ #ifdef CONFIG_LOCKDEP
+ if (exclusive)
+- WARN_ON(atomic_read(&uobj->usecnt) > 0);
++ WARN_ON(atomic_read(&uobj->usecnt) != -1);
+ else
+- WARN_ON(atomic_read(&uobj->usecnt) == -1);
++ WARN_ON(atomic_read(&uobj->usecnt) <= 0);
+ #endif
+ }
+
+@@ -452,7 +452,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
+ WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
+ return 0;
+ }
+- lockdep_check(uobj, true);
++ assert_uverbs_usecnt(uobj, true);
+ ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
+
+ up_read(&ucontext->cleanup_rwsem);
+@@ -482,7 +482,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
+ WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
+ return 0;
+ }
+- lockdep_check(uobject, true);
++ assert_uverbs_usecnt(uobject, true);
+ ret = uobject->type->type_class->remove_commit(uobject,
+ RDMA_REMOVE_DESTROY);
+ if (ret)
+@@ -569,7 +569,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
+
+ void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
+ {
+- lockdep_check(uobj, exclusive);
++ assert_uverbs_usecnt(uobj, exclusive);
+ uobj->type->type_class->lookup_put(uobj, exclusive);
+ /*
+ * In order to unlock an object, either decrease its usecnt for
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index eb85b546e223..c8b3a45e9edc 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1148,6 +1148,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
++ if (cmd.qp_state > IB_QPS_ERR)
++ return -EINVAL;
++
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+@@ -1293,6 +1296,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
++ return -EINVAL;
++
+ optval = memdup_user((void __user *) (unsigned long) cmd.optval,
+ cmd.optlen);
+ if (IS_ERR(optval)) {
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 18705cbcdc8c..8b179238f405 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -1177,7 +1177,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
++ /* check multiplication overflow */
++ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
++ return -EINVAL;
++
++ umem = ib_umem_get(context, ucmd.buf_addr,
++ (size_t)ucmd.cqe_size * entries,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index d109fe8290a7..3832edd867ed 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1813,7 +1813,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+
+ mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
+ mr->ibmr.length = 0;
+- mr->ndescs = sg_nents;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ if (unlikely(i >= mr->max_descs))
+@@ -1825,6 +1824,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+
+ sg_offset = 0;
+ }
++ mr->ndescs = i;
+
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
+index 1f316d66e6f7..41614c185918 100644
+--- a/drivers/input/keyboard/matrix_keypad.c
++++ b/drivers/input/keyboard/matrix_keypad.c
+@@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
+ {
+ struct matrix_keypad *keypad = input_get_drvdata(dev);
+
++ spin_lock_irq(&keypad->lock);
+ keypad->stopped = true;
+- mb();
++ spin_unlock_irq(&keypad->lock);
++
+ flush_work(&keypad->work.work);
+ /*
+ * matrix_keypad_scan() will leave IRQs enabled;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index cd9f61cb3fc6..ee5466a374bf 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0046", /* X250 */
+ "LEN004a", /* W541 */
+ "LEN200f", /* T450s */
+- "LEN2018", /* T460p */
+ NULL
+ };
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index b4d28928dec5..14bdaf1cef2c 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -951,6 +951,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ uint32_t rtime = cpu_to_le32(get_seconds());
+ struct uuid_entry *u;
+ char buf[BDEVNAME_SIZE];
++ struct cached_dev *exist_dc, *t;
+
+ bdevname(dc->bdev, buf);
+
+@@ -974,6 +975,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ return -EINVAL;
+ }
+
++ /* Check whether already attached */
++ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
++ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
++ pr_err("Tried to attach %s but duplicate UUID already attached",
++ buf);
++
++ return -EINVAL;
++ }
++ }
++
+ u = uuid_find(c, dc->sb.uuid);
+
+ if (u &&
+@@ -1191,7 +1202,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
+
+ return;
+ err:
+- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
++ pr_notice("error %s: %s", bdevname(bdev, name), err);
+ bcache_device_stop(&dc->disk);
+ }
+
+@@ -1859,6 +1870,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ const char *err = NULL; /* must be set for any error case */
+ int ret = 0;
+
++ bdevname(bdev, name);
++
+ memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+ ca->bdev = bdev;
+ ca->bdev->bd_holder = ca;
+@@ -1867,11 +1880,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ get_page(sb_page);
+
+- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
++ if (blk_queue_discard(bdev_get_queue(bdev)))
+ ca->discard = CACHE_DISCARD(&ca->sb);
+
+ ret = cache_alloc(ca);
+ if (ret != 0) {
++ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ if (ret == -ENOMEM)
+ err = "cache_alloc(): -ENOMEM";
+ else
+@@ -1894,14 +1908,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ goto out;
+ }
+
+- pr_info("registered cache device %s", bdevname(bdev, name));
++ pr_info("registered cache device %s", name);
+
+ out:
+ kobject_put(&ca->kobj);
+
+ err:
+ if (err)
+- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
++ pr_notice("error %s: %s", name, err);
+
+ return ret;
+ }
+@@ -1990,6 +2004,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ if (err)
+ goto err_close;
+
++ err = "failed to register device";
+ if (SB_IS_BDEV(sb)) {
+ struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+@@ -2004,7 +2019,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ goto err_close;
+
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+- goto err_close;
++ goto err;
+ }
+ out:
+ if (sb_page)
+@@ -2017,7 +2032,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ err:
+- pr_info("error opening %s: %s", path, err);
++ pr_info("error %s: %s", path, err);
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index c546b567f3b5..b3454e8c0956 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
+ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ enum data_mode *data_mode)
+ {
+- unsigned noio_flag;
+- void *ptr;
+-
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
+ *data_mode = DATA_MODE_SLAB;
+ return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
+@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ * all allocations done by this process (including pagetables) are done
+ * as if GFP_NOIO was specified.
+ */
++ if (gfp_mask & __GFP_NORETRY) {
++ unsigned noio_flag = memalloc_noio_save();
++ void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+- if (gfp_mask & __GFP_NORETRY)
+- noio_flag = memalloc_noio_save();
+-
+- ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+-
+- if (gfp_mask & __GFP_NORETRY)
+ memalloc_noio_restore(noio_flag);
++ return ptr;
++ }
+
+- return ptr;
++ return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+ }
+
+ /*
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index f6d4a50f1bdb..829ac22b72fc 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3455,7 +3455,7 @@ static int __init init_mac80211_hwsim(void)
+
+ spin_lock_init(&hwsim_radio_lock);
+
+- hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
++ hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
+ if (!hwsim_wq)
+ return -ENOMEM;
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 839650e0926a..3551fbd6fe41 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2950,7 +2950,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+
+ if (new)
+ nvme_mpath_add_disk(ns->head);
+- nvme_mpath_add_disk_links(ns);
+ return;
+ out_unlink_ns:
+ mutex_lock(&ctrl->subsys->lock);
+@@ -2970,7 +2969,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ return;
+
+ if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
+- nvme_mpath_remove_disk_links(ns);
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvme_ns_id_attr_group);
+ if (ns->ndev)
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 1218a9fca846..cf16905d25e2 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -245,25 +245,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
+ head->disk->disk_name);
+ }
+
+-void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+-{
+- struct kobject *slave_disk_kobj, *holder_disk_kobj;
+-
+- if (!ns->head->disk)
+- return;
+-
+- slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
+- if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
+- kobject_name(slave_disk_kobj)))
+- return;
+-
+- holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
+- if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
+- kobject_name(holder_disk_kobj)))
+- sysfs_remove_link(ns->head->disk->slave_dir,
+- kobject_name(slave_disk_kobj));
+-}
+-
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ {
+ if (!head->disk)
+@@ -278,14 +259,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ blk_cleanup_queue(head->disk->queue);
+ put_disk(head->disk);
+ }
+-
+-void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+-{
+- if (!ns->head->disk)
+- return;
+-
+- sysfs_remove_link(ns->disk->part0.holder_dir,
+- kobject_name(&disk_to_dev(ns->head->disk)->kobj));
+- sysfs_remove_link(ns->head->disk->slave_dir,
+- kobject_name(&disk_to_dev(ns->disk)->kobj));
+-}
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index a00eabd06427..55c49a1aa231 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -405,9 +405,7 @@ bool nvme_req_needs_failover(struct request *req);
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+ void nvme_mpath_add_disk(struct nvme_ns_head *head);
+-void nvme_mpath_add_disk_links(struct nvme_ns *ns);
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+-void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
+
+ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+ {
+@@ -448,12 +446,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
+ static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ {
+ }
+-static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+-{
+-}
+-static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+-{
+-}
+ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+ {
+ }
+diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
+index 81e2157a7cfb..bc3e2d8d0cce 100644
+--- a/drivers/pci/dwc/pcie-designware-host.c
++++ b/drivers/pci/dwc/pcie-designware-host.c
+@@ -607,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
+ /* setup bus numbers */
+ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
+ val &= 0xff000000;
+- val |= 0x00010100;
++ val |= 0x00ff0100;
+ dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
+
+ /* setup command register */
+diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
+index 72c8b3e1022b..e0a9c445ed67 100644
+--- a/drivers/regulator/stm32-vrefbuf.c
++++ b/drivers/regulator/stm32-vrefbuf.c
+@@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
+ * arbitrary timeout.
+ */
+ ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
+- !(val & STM32_VRR), 650, 10000);
++ val & STM32_VRR, 650, 10000);
+ if (ret) {
+ dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 57bf43e34863..dd9464920456 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)
+ if (shost->work_q)
+ destroy_workqueue(shost->work_q);
+
+- destroy_rcu_head(&shost->rcu);
+-
+ if (shost->shost_state == SHOST_CREATED) {
+ /*
+ * Free the shost_dev device name here if scsi_host_alloc()
+@@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ INIT_LIST_HEAD(&shost->starved_list);
+ init_waitqueue_head(&shost->host_wait);
+ mutex_init(&shost->scan_mutex);
+- init_rcu_head(&shost->rcu);
+
+ index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0)
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 01a9b8971e88..93ff92e2363f 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -315,6 +315,29 @@ struct srb_cmd {
+ /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
+ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+
++/*
++ * 24 bit port ID type definition.
++ */
++typedef union {
++ uint32_t b24 : 24;
++
++ struct {
++#ifdef __BIG_ENDIAN
++ uint8_t domain;
++ uint8_t area;
++ uint8_t al_pa;
++#elif defined(__LITTLE_ENDIAN)
++ uint8_t al_pa;
++ uint8_t area;
++ uint8_t domain;
++#else
++#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
++#endif
++ uint8_t rsvd_1;
++ } b;
++} port_id_t;
++#define INVALID_PORT_ID 0xFFFFFF
++
+ struct els_logo_payload {
+ uint8_t opcode;
+ uint8_t rsvd[3];
+@@ -338,6 +361,7 @@ struct ct_arg {
+ u32 rsp_size;
+ void *req;
+ void *rsp;
++ port_id_t id;
+ };
+
+ /*
+@@ -499,6 +523,7 @@ typedef struct srb {
+ const char *name;
+ int iocbs;
+ struct qla_qpair *qpair;
++ struct list_head elem;
+ u32 gen1; /* scratch */
+ u32 gen2; /* scratch */
+ union {
+@@ -2164,28 +2189,6 @@ struct imm_ntfy_from_isp {
+ #define REQUEST_ENTRY_SIZE (sizeof(request_t))
+
+
+-/*
+- * 24 bit port ID type definition.
+- */
+-typedef union {
+- uint32_t b24 : 24;
+-
+- struct {
+-#ifdef __BIG_ENDIAN
+- uint8_t domain;
+- uint8_t area;
+- uint8_t al_pa;
+-#elif defined(__LITTLE_ENDIAN)
+- uint8_t al_pa;
+- uint8_t area;
+- uint8_t domain;
+-#else
+-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+-#endif
+- uint8_t rsvd_1;
+- } b;
+-} port_id_t;
+-#define INVALID_PORT_ID 0xFFFFFF
+
+ /*
+ * Switch info gathering structure.
+@@ -4107,6 +4110,7 @@ typedef struct scsi_qla_host {
+ #define LOOP_READY 5
+ #define LOOP_DEAD 6
+
++ unsigned long relogin_jif;
+ unsigned long dpc_flags;
+ #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
+ #define RESET_ACTIVE 1
+@@ -4252,6 +4256,7 @@ typedef struct scsi_qla_host {
+ uint8_t n2n_node_name[WWN_SIZE];
+ uint8_t n2n_port_name[WWN_SIZE];
+ uint16_t n2n_id;
++ struct list_head gpnid_list;
+ } scsi_qla_host_t;
+
+ struct qla27xx_image_status {
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index bc3db6abc9a0..7d715e58901f 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -175,6 +175,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ }
+ break;
++ case CS_TIMEOUT:
++ rval = QLA_FUNCTION_TIMEOUT;
++ /* drop through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2033,
+ "%s failed, completion status (%x) on port_id: "
+@@ -2833,7 +2836,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ }
+ } else { /* fcport->d_id.b24 != ea->id.b24 */
+ fcport->d_id.b24 = ea->id.b24;
+- if (fcport->deleted == QLA_SESS_DELETED) {
++ if (fcport->deleted != QLA_SESS_DELETED) {
+ ql_dbg(ql_dbg_disc, vha, 0x2021,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+@@ -2889,9 +2892,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
+ ea.rc = res;
+ ea.event = FCME_GIDPN_DONE;
+
+- ql_dbg(ql_dbg_disc, vha, 0x204f,
+- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
+- sp->name, res, fcport->port_name, id);
++ if (res == QLA_FUNCTION_TIMEOUT) {
++ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
++ "Async done-%s WWPN %8phC timed out.\n",
++ sp->name, fcport->port_name);
++ qla24xx_post_gidpn_work(sp->vha, fcport);
++ sp->free(sp);
++ return;
++ } else if (res) {
++ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
++ "Async done-%s fail res %x, WWPN %8phC\n",
++ sp->name, res, fcport->port_name);
++ } else {
++ ql_dbg(ql_dbg_disc, vha, 0x204f,
++ "Async done-%s good WWPN %8phC ID %3phC\n",
++ sp->name, fcport->port_name, id);
++ }
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+@@ -3155,43 +3171,136 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+
+ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ {
+- fc_port_t *fcport;
+- unsigned long flags;
++ fc_port_t *fcport, *conflict, *t;
+
+- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d port_id: %06x\n",
++ __func__, __LINE__, ea->id.b24);
+
+- if (fcport) {
+- /* cable moved. just plugged in */
+- fcport->rscn_gen++;
+- fcport->d_id = ea->id;
+- fcport->scan_state = QLA_FCPORT_FOUND;
+- fcport->flags |= FCF_FABRIC_DEVICE;
+-
+- switch (fcport->disc_state) {
+- case DSC_DELETED:
+- ql_dbg(ql_dbg_disc, vha, 0x210d,
+- "%s %d %8phC login\n", __func__, __LINE__,
+- fcport->port_name);
+- qla24xx_fcport_handle_login(vha, fcport);
+- break;
+- case DSC_DELETE_PEND:
+- break;
+- default:
+- ql_dbg(ql_dbg_disc, vha, 0x2064,
+- "%s %d %8phC post del sess\n",
+- __func__, __LINE__, fcport->port_name);
+- qlt_schedule_sess_for_deletion_lock(fcport);
+- break;
++ if (ea->rc) {
++ /* cable is disconnected */
++ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
++ if (fcport->d_id.b24 == ea->id.b24) {
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC DS %d\n",
++ __func__, __LINE__,
++ fcport->port_name,
++ fcport->disc_state);
++ fcport->scan_state = QLA_FCPORT_SCAN;
++ switch (fcport->disc_state) {
++ case DSC_DELETED:
++ case DSC_DELETE_PEND:
++ break;
++ default:
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC post del sess\n",
++ __func__, __LINE__,
++ fcport->port_name);
++ qlt_schedule_sess_for_deletion_lock
++ (fcport);
++ break;
++ }
++ }
+ }
+ } else {
+- /* create new fcport */
+- ql_dbg(ql_dbg_disc, vha, 0x2065,
+- "%s %d %8phC post new sess\n",
+- __func__, __LINE__, ea->port_name);
++ /* cable is connected */
++ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
++ if (fcport) {
++ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
++ list) {
++ if ((conflict->d_id.b24 == ea->id.b24) &&
++ (fcport != conflict)) {
++ /* 2 fcports with conflict Nport ID or
++ * an existing fcport is having nport ID
++ * conflict with new fcport.
++ */
++
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC DS %d\n",
++ __func__, __LINE__,
++ conflict->port_name,
++ conflict->disc_state);
++ conflict->scan_state = QLA_FCPORT_SCAN;
++ switch (conflict->disc_state) {
++ case DSC_DELETED:
++ case DSC_DELETE_PEND:
++ break;
++ default:
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC post del sess\n",
++ __func__, __LINE__,
++ conflict->port_name);
++ qlt_schedule_sess_for_deletion_lock
++ (conflict);
++ break;
++ }
++ }
++ }
+
+- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
++ fcport->rscn_gen++;
++ fcport->scan_state = QLA_FCPORT_FOUND;
++ fcport->flags |= FCF_FABRIC_DEVICE;
++ switch (fcport->disc_state) {
++ case DSC_LOGIN_COMPLETE:
++ /* recheck session is still intact. */
++ ql_dbg(ql_dbg_disc, vha, 0x210d,
++ "%s %d %8phC revalidate session with ADISC\n",
++ __func__, __LINE__, fcport->port_name);
++ qla24xx_post_gpdb_work(vha, fcport,
++ PDO_FORCE_ADISC);
++ break;
++ case DSC_DELETED:
++ ql_dbg(ql_dbg_disc, vha, 0x210d,
++ "%s %d %8phC login\n", __func__, __LINE__,
++ fcport->port_name);
++ fcport->d_id = ea->id;
++ qla24xx_fcport_handle_login(vha, fcport);
++ break;
++ case DSC_DELETE_PEND:
++ fcport->d_id = ea->id;
++ break;
++ default:
++ fcport->d_id = ea->id;
++ break;
++ }
++ } else {
++ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
++ list) {
++ if (conflict->d_id.b24 == ea->id.b24) {
++ /* 2 fcports with conflict Nport ID or
++ * an existing fcport is having nport ID
++ * conflict with new fcport.
++ */
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC DS %d\n",
++ __func__, __LINE__,
++ conflict->port_name,
++ conflict->disc_state);
++
++ conflict->scan_state = QLA_FCPORT_SCAN;
++ switch (conflict->disc_state) {
++ case DSC_DELETED:
++ case DSC_DELETE_PEND:
++ break;
++ default:
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %d %8phC post del sess\n",
++ __func__, __LINE__,
++ conflict->port_name);
++ qlt_schedule_sess_for_deletion_lock
++ (conflict);
++ break;
++ }
++ }
++ }
++
++ /* create new fcport */
++ ql_dbg(ql_dbg_disc, vha, 0x2065,
++ "%s %d %8phC post new sess\n",
++ __func__, __LINE__, ea->port_name);
++ qla24xx_post_newsess_work(vha, &ea->id,
++ ea->port_name, NULL);
++ }
+ }
+ }
+
+@@ -3205,11 +3314,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct event_arg ea;
+ struct qla_work_evt *e;
++ unsigned long flags;
+
+- ql_dbg(ql_dbg_disc, vha, 0x2066,
+- "Async done-%s res %x ID %3phC. %8phC\n",
+- sp->name, res, ct_req->req.port_id.port_id,
+- ct_rsp->rsp.gpn_id.port_name);
++ if (res)
++ ql_dbg(ql_dbg_disc, vha, 0x2066,
++ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
++ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
++ ct_rsp->rsp.gpn_id.port_name);
++ else
++ ql_dbg(ql_dbg_disc, vha, 0x2066,
++ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
++ sp->name, sp->gen1, ct_req->req.port_id.port_id,
++ ct_rsp->rsp.gpn_id.port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+@@ -3220,6 +3336,23 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
+ ea.rc = res;
+ ea.event = FCME_GPNID_DONE;
+
++ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
++ list_del(&sp->elem);
++ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++
++ if (res) {
++ if (res == QLA_FUNCTION_TIMEOUT) {
++ qla24xx_post_gpnid_work(sp->vha, &ea.id);
++ sp->free(sp);
++ return;
++ }
++ } else if (sp->gen1) {
++ /* There was another RSCN for this Nport ID */
++ qla24xx_post_gpnid_work(sp->vha, &ea.id);
++ sp->free(sp);
++ return;
++ }
++
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+@@ -3253,8 +3386,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
+ {
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+- srb_t *sp;
++ srb_t *sp, *tsp;
+ struct ct_sns_pkt *ct_sns;
++ unsigned long flags;
+
+ if (!vha->flags.online)
+ goto done;
+@@ -3265,8 +3399,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnid";
++ sp->u.iocb_cmd.u.ctarg.id = *id;
++ sp->gen1 = 0;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
++ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
++ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
++ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
++ tsp->gen1++;
++ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++ sp->free(sp);
++ goto done;
++ }
++ }
++ list_add_tail(&sp->elem, &vha->gpnid_list);
++ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 1bafa043f9f1..6082389f25c3 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -863,6 +863,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ int rval = ea->rc;
+ fc_port_t *fcport = ea->fcport;
+ unsigned long flags;
++ u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+@@ -893,7 +894,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ }
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+- ea->fcport->login_gen++;
++ if (opt != PDO_FORCE_ADISC)
++ ea->fcport->login_gen++;
+ ea->fcport->deleted = 0;
+ ea->fcport->logout_on_delete = 1;
+
+@@ -917,6 +919,16 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
++ } else if (ea->fcport->login_succ) {
++ /*
++ * We have an existing session. A late RSCN delivery
++ * must have triggered the session to be re-validate.
++ * session is still valid.
++ */
++ ql_dbg(ql_dbg_disc, vha, 0x20d6,
++ "%s %d %8phC session revalidate success\n",
++ __func__, __LINE__, fcport->port_name);
++ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ } /* gpdb event */
+@@ -963,7 +975,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+- qla24xx_async_gnl(vha, fcport);
++ qla24xx_post_gnl_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+@@ -1040,9 +1052,8 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_LOGIN_COMPLETE:
+- qla24xx_post_gidpn_work(fcport->vha, fcport);
++ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
+ break;
+-
+ default:
+ break;
+ }
+@@ -1132,7 +1143,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+ ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+
+- qla24xx_async_gidpn(vha, fcport);
++ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+@@ -1347,6 +1358,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
+ srb_t *sp = ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
++ del_timer(&sp->u.iocb_cmd.timer);
+ complete(&abt->u.abt.comp);
+ }
+
+@@ -1452,6 +1464,8 @@ static void
+ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ {
+ port_id_t cid; /* conflict Nport id */
++ u16 lid;
++ struct fc_port *conflict_fcport;
+
+ switch (ea->data[0]) {
+ case MBS_COMMAND_COMPLETE:
+@@ -1467,8 +1481,12 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ qla24xx_post_prli_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+- "%s %d %8phC post gpdb\n",
+- __func__, __LINE__, ea->fcport->port_name);
++ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
++ __func__, __LINE__, ea->fcport->port_name,
++ ea->fcport->loop_id, ea->fcport->d_id.b24);
++
++ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
++ ea->fcport->loop_id = FC_NO_LOOP_ID;
+ ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ ea->fcport->send_els_logo = 0;
+@@ -1513,8 +1531,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
+ ea->fcport->d_id.b.al_pa);
+
+- qla2x00_clear_loop_id(ea->fcport);
+- qla24xx_post_gidpn_work(vha, ea->fcport);
++ lid = ea->iop[1] & 0xffff;
++ qlt_find_sess_invalidate_other(vha,
++ wwn_to_u64(ea->fcport->port_name),
++ ea->fcport->d_id, lid, &conflict_fcport);
++
++ if (conflict_fcport) {
++ /*
++ * Another fcport share the same loop_id/nport id.
++ * Conflict fcport needs to finish cleanup before this
++ * fcport can proceed to login.
++ */
++ conflict_fcport->conflict = ea->fcport;
++ ea->fcport->login_pause = 1;
++
++ ql_dbg(ql_dbg_disc, vha, 0x20ed,
++ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
++ __func__, __LINE__, ea->fcport->port_name,
++ ea->fcport->d_id.b24, lid);
++ qla2x00_clear_loop_id(ea->fcport);
++ qla24xx_post_gidpn_work(vha, ea->fcport);
++ } else {
++ ql_dbg(ql_dbg_disc, vha, 0x20ed,
++ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
++ __func__, __LINE__, ea->fcport->port_name,
++ ea->fcport->d_id.b24, lid);
++
++ qla2x00_clear_loop_id(ea->fcport);
++ set_bit(lid, vha->hw->loop_id_map);
++ ea->fcport->loop_id = lid;
++ ea->fcport->keep_nport_handle = 0;
++ qlt_schedule_sess_for_deletion(ea->fcport, false);
++ }
+ break;
+ }
+ return;
+@@ -8173,9 +8221,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+ int ret = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = qpair->hw;
+
+- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
+- goto fail;
+-
+ qpair->delete_in_progress = 1;
+ while (atomic_read(&qpair->ref_count))
+ msleep(500);
+@@ -8183,6 +8228,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+ ret = qla25xx_delete_req_que(vha, qpair->req);
+ if (ret != QLA_SUCCESS)
+ goto fail;
++
+ ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index d810a447cb4a..8ea59586f4f1 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -2392,26 +2392,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
+ srb_t *sp = data;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = sp->vha;
+- struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+- unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_io, vha, 0x3069,
+ "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
+ sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+- /* Abort the exchange */
+- spin_lock_irqsave(&ha->hardware_lock, flags);
+- if (ha->isp_ops->abort_command(sp)) {
+- ql_dbg(ql_dbg_io, vha, 0x3070,
+- "mbx abort_command failed.\n");
+- } else {
+- ql_dbg(ql_dbg_io, vha, 0x3071,
+- "mbx abort_command success.\n");
+- }
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+-
+ complete(&lio->u.els_logo.comp);
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 2fd79129bb2a..85382387a52b 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ /* borrowing sts_entry_24xx.comp_status.
+ same location as ct_entry_24xx.comp_status
+ */
+- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
++ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->name);
+ sp->done(sp, res);
+@@ -2369,7 +2369,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ int res = 0;
+ uint16_t state_flags = 0;
+ uint16_t retry_delay = 0;
+- uint8_t no_logout = 0;
+
+ sts = (sts_entry_t *) pkt;
+ sts24 = (struct sts_entry_24xx *) pkt;
+@@ -2640,7 +2639,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ break;
+
+ case CS_PORT_LOGGED_OUT:
+- no_logout = 1;
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+@@ -2671,9 +2669,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ port_state_str[atomic_read(&fcport->state)],
+ comp_status);
+
+- if (no_logout)
+- fcport->logout_on_delete = 0;
+-
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index cb717d47339f..e2b5fa47bb57 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -6160,8 +6160,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+ }
+
+ /* Check for logged in state. */
+- if (current_login_state != PDS_PRLI_COMPLETE &&
+- last_login_state != PDS_PRLI_COMPLETE) {
++ if (current_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+ current_login_state, last_login_state, fcport->loop_id);
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index bd9f14bf7ac2..e538e6308885 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -343,15 +343,21 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
+ "FCPort update end.\n");
+ }
+
+- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
+- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+-
+- ql_dbg(ql_dbg_dpc, vha, 0x4018,
+- "Relogin needed scheduled.\n");
+- qla2x00_relogin(vha);
+- ql_dbg(ql_dbg_dpc, vha, 0x4019,
+- "Relogin needed end.\n");
++ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
++ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
++ atomic_read(&vha->loop_state) != LOOP_DOWN) {
++
++ if (!vha->relogin_jif ||
++ time_after_eq(jiffies, vha->relogin_jif)) {
++ vha->relogin_jif = jiffies + HZ;
++ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
++
++ ql_dbg(ql_dbg_dpc, vha, 0x4018,
++ "Relogin needed scheduled.\n");
++ qla2x00_relogin(vha);
++ ql_dbg(ql_dbg_dpc, vha, 0x4019,
++ "Relogin needed end.\n");
++ }
+ }
+
+ if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
+@@ -569,14 +575,15 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+ int
+ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+ {
+- int ret = -1;
++ int ret = QLA_SUCCESS;
+
+- if (req) {
++ if (req && vha->flags.qpairs_req_created) {
+ req->options |= BIT_0;
+ ret = qla25xx_init_req_que(vha, req);
++ if (ret != QLA_SUCCESS)
++ return QLA_FUNCTION_FAILED;
+ }
+- if (ret == QLA_SUCCESS)
+- qla25xx_free_req_que(vha, req);
++ qla25xx_free_req_que(vha, req);
+
+ return ret;
+ }
+@@ -584,14 +591,15 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+ int
+ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+ {
+- int ret = -1;
++ int ret = QLA_SUCCESS;
+
+- if (rsp) {
++ if (rsp && vha->flags.qpairs_rsp_created) {
+ rsp->options |= BIT_0;
+ ret = qla25xx_init_rsp_que(vha, rsp);
++ if (ret != QLA_SUCCESS)
++ return QLA_FUNCTION_FAILED;
+ }
+- if (ret == QLA_SUCCESS)
+- qla25xx_free_rsp_que(vha, rsp);
++ qla25xx_free_rsp_que(vha, rsp);
+
+ return ret;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 46f2d0cf7c0d..1f69e89b950f 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3011,9 +3011,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ base_vha = qla2x00_create_host(sht, ha);
+ if (!base_vha) {
+ ret = -ENOMEM;
+- qla2x00_mem_free(ha);
+- qla2x00_free_req_que(ha, req);
+- qla2x00_free_rsp_que(ha, rsp);
+ goto probe_hw_failed;
+ }
+
+@@ -3074,7 +3071,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+- goto probe_init_failed;
++ goto probe_hw_failed;
+
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha, req, rsp)) {
+@@ -3193,10 +3190,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
++ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
++
+ if (ha->mqenable) {
+ bool mq = false;
+ bool startit = false;
+- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
+
+ if (QLA_TGT_MODE_ENABLED()) {
+ mq = true;
+@@ -3390,6 +3388,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ scsi_host_put(base_vha->host);
+
+ probe_hw_failed:
++ qla2x00_mem_free(ha);
++ qla2x00_free_req_que(ha, req);
++ qla2x00_free_rsp_que(ha, rsp);
+ qla2x00_clear_drv_active(ha);
+
+ iospace_config_failed:
+@@ -4514,6 +4515,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ INIT_LIST_HEAD(&vha->qp_list);
+ INIT_LIST_HEAD(&vha->gnl.fcports);
+ INIT_LIST_HEAD(&vha->nvme_rport_list);
++ INIT_LIST_HEAD(&vha->gpnid_list);
+
+ spin_lock_init(&vha->work_lock);
+ spin_lock_init(&vha->cmd_list_lock);
+@@ -4748,20 +4750,49 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ } else {
+ list_add_tail(&fcport->list, &vha->vp_fcports);
+
+- if (pla) {
+- qlt_plogi_ack_link(vha, pla, fcport,
+- QLT_PLOGI_LINK_SAME_WWN);
+- pla->ref_count--;
+- }
++ }
++ if (pla) {
++ qlt_plogi_ack_link(vha, pla, fcport,
++ QLT_PLOGI_LINK_SAME_WWN);
++ pla->ref_count--;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+- if (pla)
++ if (pla) {
+ qlt_plogi_ack_unref(vha, pla);
+- else
+- qla24xx_async_gffid(vha, fcport);
++ } else {
++ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
++ tfcp = qla2x00_find_fcport_by_nportid(vha,
++ &e->u.new_sess.id, 1);
++ if (tfcp && (tfcp != fcport)) {
++ /*
++ * We have a conflict fcport with same NportID.
++ */
++ ql_dbg(ql_dbg_disc, vha, 0xffff,
++ "%s %8phC found conflict b4 add. DS %d LS %d\n",
++ __func__, tfcp->port_name, tfcp->disc_state,
++ tfcp->fw_login_state);
++
++ switch (tfcp->disc_state) {
++ case DSC_DELETED:
++ break;
++ case DSC_DELETE_PEND:
++ fcport->login_pause = 1;
++ tfcp->conflict = fcport;
++ break;
++ default:
++ fcport->login_pause = 1;
++ tfcp->conflict = fcport;
++ qlt_schedule_sess_for_deletion_lock
++ (tfcp);
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
++ qla24xx_async_gnl(vha, fcport);
++ }
+ }
+
+ if (free_fcport) {
+@@ -4874,7 +4905,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
+ */
+ if (atomic_read(&fcport->state) != FCS_ONLINE &&
+ fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
+- fcport->login_retry--;
++
+ if (fcport->flags & FCF_FABRIC_DEVICE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
+ "%s %8phC DS %d LS %d\n", __func__,
+@@ -4885,6 +4916,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else {
++ fcport->login_retry--;
+ status = qla2x00_local_device_login(vha,
+ fcport);
+ if (status == QLA_SUCCESS) {
+@@ -5867,16 +5899,21 @@ qla2x00_do_dpc(void *data)
+ }
+
+ /* Retry each device up to login retry count */
+- if ((test_and_clear_bit(RELOGIN_NEEDED,
+- &base_vha->dpc_flags)) &&
++ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
+ atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
+
+- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
+- "Relogin scheduled.\n");
+- qla2x00_relogin(base_vha);
+- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
+- "Relogin end.\n");
++ if (!base_vha->relogin_jif ||
++ time_after_eq(jiffies, base_vha->relogin_jif)) {
++ base_vha->relogin_jif = jiffies + HZ;
++ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
++
++ ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
++ "Relogin scheduled.\n");
++ qla2x00_relogin(base_vha);
++ ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
++ "Relogin end.\n");
++ }
+ }
+ loop_resync_check:
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+@@ -6608,9 +6645,14 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
+
+ static int qla2xxx_map_queues(struct Scsi_Host *shost)
+ {
++ int rc;
+ scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
++ if (USER_CTRL_IRQ(vha->hw))
++ rc = blk_mq_map_queues(&shost->tag_set);
++ else
++ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
++ return rc;
+ }
+
+ static const struct pci_error_handlers qla2xxx_err_handler = {
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 18069edd4773..cb35bb1ae305 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -665,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+ sp->u.iocb_cmd.u.nack.ntfy = ntfy;
+-
++ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_nack_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+@@ -890,6 +890,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+ pla->ref_count, pla, link);
+
++ if (link == QLT_PLOGI_LINK_CONFLICT) {
++ switch (sess->disc_state) {
++ case DSC_DELETED:
++ case DSC_DELETE_PEND:
++ pla->ref_count--;
++ return;
++ default:
++ break;
++ }
++ }
++
+ if (sess->plogi_link[link])
+ qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
+
+@@ -974,7 +985,7 @@ static void qlt_free_session_done(struct work_struct *work)
+ qlt_send_first_logo(vha, &logo);
+ }
+
+- if (sess->logout_on_delete) {
++ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
+ int rc;
+
+ rc = qla2x00_post_async_logout_work(vha, sess, NULL);
+@@ -1033,8 +1044,7 @@ static void qlt_free_session_done(struct work_struct *work)
+ sess->login_succ = 0;
+ }
+
+- if (sess->chip_reset != ha->base_qpair->chip_reset)
+- qla2x00_clear_loop_id(sess);
++ qla2x00_clear_loop_id(sess);
+
+ if (sess->conflict) {
+ sess->conflict->login_pause = 0;
+@@ -1205,7 +1215,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+
+- schedule_work(&sess->del_work);
++ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
++ queue_work(sess->vha->hw->wq, &sess->del_work);
+ }
+
+ void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
+@@ -1560,8 +1571,11 @@ static void qlt_release(struct qla_tgt *tgt)
+
+ btree_destroy64(&tgt->lun_qpair_map);
+
+- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
+- ha->tgt.tgt_ops->remove_target(vha);
++ if (vha->vp_idx)
++ if (ha->tgt.tgt_ops &&
++ ha->tgt.tgt_ops->remove_target &&
++ vha->vha_tgt.target_lport_ptr)
++ ha->tgt.tgt_ops->remove_target(vha);
+
+ vha->vha_tgt.qla_tgt = NULL;
+
+@@ -3708,7 +3722,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
+ term = 1;
+
+ if (term)
+- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
++ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
+
+ return term;
+ }
+@@ -4584,9 +4598,9 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+-
+ other_sess->keep_nport_handle = 1;
+- *conflict_sess = other_sess;
++ if (other_sess->disc_state != DSC_DELETED)
++ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess,
+ true);
+ }
+@@ -4733,6 +4747,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ sess->d_id = port_id;
+ sess->login_gen++;
+
++ ql_dbg(ql_dbg_disc, vha, 0x20f9,
++ "%s %d %8phC DS %d\n",
++ __func__, __LINE__, sess->port_name, sess->disc_state);
++
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+@@ -4782,12 +4800,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ }
+
+ if (conflict_sess) {
+- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+- "PRLI with conflicting sess %p port %8phC\n",
+- conflict_sess, conflict_sess->port_name);
+- qlt_send_term_imm_notif(vha, iocb, 1);
+- res = 0;
+- break;
++ switch (conflict_sess->disc_state) {
++ case DSC_DELETED:
++ case DSC_DELETE_PEND:
++ break;
++ default:
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
++ "PRLI with conflicting sess %p port %8phC\n",
++ conflict_sess, conflict_sess->port_name);
++ conflict_sess->fw_login_state =
++ DSC_LS_PORT_UNAVAIL;
++ qlt_send_term_imm_notif(vha, iocb, 1);
++ res = 0;
++ break;
++ }
+ }
+
+ if (sess != NULL) {
+@@ -5755,7 +5781,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ unsigned long flags;
+ u8 newfcport = 0;
+
+- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
++ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ "qla_target(%d): Allocation of tmp FC port failed",
+@@ -5784,6 +5810,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ tfcp->port_type = fcport->port_type;
+ tfcp->supported_classes = fcport->supported_classes;
+ tfcp->flags |= fcport->flags;
++ tfcp->scan_state = QLA_FCPORT_FOUND;
+
+ del = fcport;
+ fcport = tfcp;
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 3737c6d3b064..61628581c6a2 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -222,7 +222,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
+
+ static void scsi_eh_inc_host_failed(struct rcu_head *head)
+ {
+- struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
++ struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
++ struct Scsi_Host *shost = scmd->device->host;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+@@ -258,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+- call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
++ call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 83856ee14851..8f9a2e50d742 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -670,6 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
+ if (!blk_rq_is_scsi(req)) {
+ WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags &= ~SCMD_INITIALIZED;
++ destroy_rcu_head(&cmd->rcu);
+ }
+
+ if (req->mq_ctx) {
+@@ -1150,6 +1151,7 @@ void scsi_initialize_rq(struct request *rq)
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ scsi_req_init(&cmd->req);
++ init_rcu_head(&cmd->rcu);
+ cmd->jiffies_at_alloc = jiffies;
+ cmd->retries = 0;
+ }
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index eb30f3e09a47..71458f493cf8 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -428,8 +428,6 @@ static inline int virtqueue_add(struct virtqueue *_vq,
+ i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
+ }
+
+- vq->vq.num_free += total_sg;
+-
+ if (indirect)
+ kfree(desc);
+
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 67fbe35ce7cf..b0a158073abd 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -28,16 +28,7 @@
+ #include <linux/types.h>
+ #include <linux/uaccess.h>
+ #include <linux/watchdog.h>
+-#ifdef CONFIG_HPWDT_NMI_DECODING
+-#include <linux/dmi.h>
+-#include <linux/spinlock.h>
+-#include <linux/nmi.h>
+-#include <linux/kdebug.h>
+-#include <linux/notifier.h>
+-#include <asm/set_memory.h>
+-#endif /* CONFIG_HPWDT_NMI_DECODING */
+ #include <asm/nmi.h>
+-#include <asm/frame.h>
+
+ #define HPWDT_VERSION "1.4.0"
+ #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
+@@ -48,10 +39,14 @@
+ static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
+ static unsigned int reload; /* the computed soft_margin */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
++#ifdef CONFIG_HPWDT_NMI_DECODING
++static unsigned int allow_kdump = 1;
++#endif
+ static char expect_release;
+ static unsigned long hpwdt_is_open;
+
+ static void __iomem *pci_mem_addr; /* the PCI-memory address */
++static unsigned long __iomem *hpwdt_nmistat;
+ static unsigned long __iomem *hpwdt_timer_reg;
+ static unsigned long __iomem *hpwdt_timer_con;
+
+@@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, hpwdt_devices);
+
+-#ifdef CONFIG_HPWDT_NMI_DECODING
+-#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
+-#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
+-#define PCI_BIOS32_PARAGRAPH_LEN 16
+-#define PCI_ROM_BASE1 0x000F0000
+-#define ROM_SIZE 0x10000
+-
+-struct bios32_service_dir {
+- u32 signature;
+- u32 entry_point;
+- u8 revision;
+- u8 length;
+- u8 checksum;
+- u8 reserved[5];
+-};
+-
+-/* type 212 */
+-struct smbios_cru64_info {
+- u8 type;
+- u8 byte_length;
+- u16 handle;
+- u32 signature;
+- u64 physical_address;
+- u32 double_length;
+- u32 double_offset;
+-};
+-#define SMBIOS_CRU64_INFORMATION 212
+-
+-/* type 219 */
+-struct smbios_proliant_info {
+- u8 type;
+- u8 byte_length;
+- u16 handle;
+- u32 power_features;
+- u32 omega_features;
+- u32 reserved;
+- u32 misc_features;
+-};
+-#define SMBIOS_ICRU_INFORMATION 219
+-
+-
+-struct cmn_registers {
+- union {
+- struct {
+- u8 ral;
+- u8 rah;
+- u16 rea2;
+- };
+- u32 reax;
+- } u1;
+- union {
+- struct {
+- u8 rbl;
+- u8 rbh;
+- u8 reb2l;
+- u8 reb2h;
+- };
+- u32 rebx;
+- } u2;
+- union {
+- struct {
+- u8 rcl;
+- u8 rch;
+- u16 rec2;
+- };
+- u32 recx;
+- } u3;
+- union {
+- struct {
+- u8 rdl;
+- u8 rdh;
+- u16 red2;
+- };
+- u32 redx;
+- } u4;
+-
+- u32 resi;
+- u32 redi;
+- u16 rds;
+- u16 res;
+- u32 reflags;
+-} __attribute__((packed));
+-
+-static unsigned int hpwdt_nmi_decoding;
+-static unsigned int allow_kdump = 1;
+-static unsigned int is_icru;
+-static unsigned int is_uefi;
+-static DEFINE_SPINLOCK(rom_lock);
+-static void *cru_rom_addr;
+-static struct cmn_registers cmn_regs;
+-
+-extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
+- unsigned long *pRomEntry);
+-
+-#ifdef CONFIG_X86_32
+-/* --32 Bit Bios------------------------------------------------------------ */
+-
+-#define HPWDT_ARCH 32
+-
+-asm(".text \n\t"
+- ".align 4 \n\t"
+- ".globl asminline_call \n"
+- "asminline_call: \n\t"
+- "pushl %ebp \n\t"
+- "movl %esp, %ebp \n\t"
+- "pusha \n\t"
+- "pushf \n\t"
+- "push %es \n\t"
+- "push %ds \n\t"
+- "pop %es \n\t"
+- "movl 8(%ebp),%eax \n\t"
+- "movl 4(%eax),%ebx \n\t"
+- "movl 8(%eax),%ecx \n\t"
+- "movl 12(%eax),%edx \n\t"
+- "movl 16(%eax),%esi \n\t"
+- "movl 20(%eax),%edi \n\t"
+- "movl (%eax),%eax \n\t"
+- "push %cs \n\t"
+- "call *12(%ebp) \n\t"
+- "pushf \n\t"
+- "pushl %eax \n\t"
+- "movl 8(%ebp),%eax \n\t"
+- "movl %ebx,4(%eax) \n\t"
+- "movl %ecx,8(%eax) \n\t"
+- "movl %edx,12(%eax) \n\t"
+- "movl %esi,16(%eax) \n\t"
+- "movl %edi,20(%eax) \n\t"
+- "movw %ds,24(%eax) \n\t"
+- "movw %es,26(%eax) \n\t"
+- "popl %ebx \n\t"
+- "movl %ebx,(%eax) \n\t"
+- "popl %ebx \n\t"
+- "movl %ebx,28(%eax) \n\t"
+- "pop %es \n\t"
+- "popf \n\t"
+- "popa \n\t"
+- "leave \n\t"
+- "ret \n\t"
+- ".previous");
+-
+-
+-/*
+- * cru_detect
+- *
+- * Routine Description:
+- * This function uses the 32-bit BIOS Service Directory record to
+- * search for a $CRU record.
+- *
+- * Return Value:
+- * 0 : SUCCESS
+- * <0 : FAILURE
+- */
+-static int cru_detect(unsigned long map_entry,
+- unsigned long map_offset)
+-{
+- void *bios32_map;
+- unsigned long *bios32_entrypoint;
+- unsigned long cru_physical_address;
+- unsigned long cru_length;
+- unsigned long physical_bios_base = 0;
+- unsigned long physical_bios_offset = 0;
+- int retval = -ENODEV;
+-
+- bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
+-
+- if (bios32_map == NULL)
+- return -ENODEV;
+-
+- bios32_entrypoint = bios32_map + map_offset;
+-
+- cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+-
+- set_memory_x((unsigned long)bios32_map, 2);
+- asminline_call(&cmn_regs, bios32_entrypoint);
+-
+- if (cmn_regs.u1.ral != 0) {
+- pr_warn("Call succeeded but with an error: 0x%x\n",
+- cmn_regs.u1.ral);
+- } else {
+- physical_bios_base = cmn_regs.u2.rebx;
+- physical_bios_offset = cmn_regs.u4.redx;
+- cru_length = cmn_regs.u3.recx;
+- cru_physical_address =
+- physical_bios_base + physical_bios_offset;
+-
+- /* If the values look OK, then map it in. */
+- if ((physical_bios_base + physical_bios_offset)) {
+- cru_rom_addr =
+- ioremap(cru_physical_address, cru_length);
+- if (cru_rom_addr) {
+- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
+- (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
+- retval = 0;
+- }
+- }
+-
+- pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
+- pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
+- pr_debug("CRU Length: 0x%lx\n", cru_length);
+- pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
+- }
+- iounmap(bios32_map);
+- return retval;
+-}
+-
+-/*
+- * bios_checksum
+- */
+-static int bios_checksum(const char __iomem *ptr, int len)
+-{
+- char sum = 0;
+- int i;
+-
+- /*
+- * calculate checksum of size bytes. This should add up
+- * to zero if we have a valid header.
+- */
+- for (i = 0; i < len; i++)
+- sum += ptr[i];
+-
+- return ((sum == 0) && (len > 0));
+-}
+-
+-/*
+- * bios32_present
+- *
+- * Routine Description:
+- * This function finds the 32-bit BIOS Service Directory
+- *
+- * Return Value:
+- * 0 : SUCCESS
+- * <0 : FAILURE
+- */
+-static int bios32_present(const char __iomem *p)
+-{
+- struct bios32_service_dir *bios_32_ptr;
+- int length;
+- unsigned long map_entry, map_offset;
+-
+- bios_32_ptr = (struct bios32_service_dir *) p;
+-
+- /*
+- * Search for signature by checking equal to the swizzled value
+- * instead of calling another routine to perform a strcmp.
+- */
+- if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
+- length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
+- if (bios_checksum(p, length)) {
+- /*
+- * According to the spec, we're looking for the
+- * first 4KB-aligned address below the entrypoint
+- * listed in the header. The Service Directory code
+- * is guaranteed to occupy no more than 2 4KB pages.
+- */
+- map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
+- map_offset = bios_32_ptr->entry_point - map_entry;
+-
+- return cru_detect(map_entry, map_offset);
+- }
+- }
+- return -ENODEV;
+-}
+-
+-static int detect_cru_service(void)
+-{
+- char __iomem *p, *q;
+- int rc = -1;
+-
+- /*
+- * Search from 0x0f0000 through 0x0fffff, inclusive.
+- */
+- p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
+- if (p == NULL)
+- return -ENOMEM;
+-
+- for (q = p; q < p + ROM_SIZE; q += 16) {
+- rc = bios32_present(q);
+- if (!rc)
+- break;
+- }
+- iounmap(p);
+- return rc;
+-}
+-/* ------------------------------------------------------------------------- */
+-#endif /* CONFIG_X86_32 */
+-#ifdef CONFIG_X86_64
+-/* --64 Bit Bios------------------------------------------------------------ */
+-
+-#define HPWDT_ARCH 64
+-
+-asm(".text \n\t"
+- ".align 4 \n\t"
+- ".globl asminline_call \n\t"
+- ".type asminline_call, @function \n\t"
+- "asminline_call: \n\t"
+- FRAME_BEGIN
+- "pushq %rax \n\t"
+- "pushq %rbx \n\t"
+- "pushq %rdx \n\t"
+- "pushq %r12 \n\t"
+- "pushq %r9 \n\t"
+- "movq %rsi, %r12 \n\t"
+- "movq %rdi, %r9 \n\t"
+- "movl 4(%r9),%ebx \n\t"
+- "movl 8(%r9),%ecx \n\t"
+- "movl 12(%r9),%edx \n\t"
+- "movl 16(%r9),%esi \n\t"
+- "movl 20(%r9),%edi \n\t"
+- "movl (%r9),%eax \n\t"
+- "call *%r12 \n\t"
+- "pushfq \n\t"
+- "popq %r12 \n\t"
+- "movl %eax, (%r9) \n\t"
+- "movl %ebx, 4(%r9) \n\t"
+- "movl %ecx, 8(%r9) \n\t"
+- "movl %edx, 12(%r9) \n\t"
+- "movl %esi, 16(%r9) \n\t"
+- "movl %edi, 20(%r9) \n\t"
+- "movq %r12, %rax \n\t"
+- "movl %eax, 28(%r9) \n\t"
+- "popq %r9 \n\t"
+- "popq %r12 \n\t"
+- "popq %rdx \n\t"
+- "popq %rbx \n\t"
+- "popq %rax \n\t"
+- FRAME_END
+- "ret \n\t"
+- ".previous");
+-
+-/*
+- * dmi_find_cru
+- *
+- * Routine Description:
+- * This function checks whether or not a SMBIOS/DMI record is
+- * the 64bit CRU info or not
+- */
+-static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
+-{
+- struct smbios_cru64_info *smbios_cru64_ptr;
+- unsigned long cru_physical_address;
+-
+- if (dm->type == SMBIOS_CRU64_INFORMATION) {
+- smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
+- if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
+- cru_physical_address =
+- smbios_cru64_ptr->physical_address +
+- smbios_cru64_ptr->double_offset;
+- cru_rom_addr = ioremap(cru_physical_address,
+- smbios_cru64_ptr->double_length);
+- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
+- smbios_cru64_ptr->double_length >> PAGE_SHIFT);
+- }
+- }
+-}
+-
+-static int detect_cru_service(void)
+-{
+- cru_rom_addr = NULL;
+-
+- dmi_walk(dmi_find_cru, NULL);
+-
+- /* if cru_rom_addr has been set then we found a CRU service */
+- return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
+-}
+-/* ------------------------------------------------------------------------- */
+-#endif /* CONFIG_X86_64 */
+-#endif /* CONFIG_HPWDT_NMI_DECODING */
+
+ /*
+ * Watchdog operations
+@@ -475,32 +103,22 @@ static int hpwdt_time_left(void)
+ }
+
+ #ifdef CONFIG_HPWDT_NMI_DECODING
++static int hpwdt_my_nmi(void)
++{
++ return ioread8(hpwdt_nmistat) & 0x6;
++}
++
+ /*
+ * NMI Handler
+ */
+ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
+ {
+- unsigned long rom_pl;
+- static int die_nmi_called;
+-
+- if (!hpwdt_nmi_decoding)
++ if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
+ return NMI_DONE;
+
+- spin_lock_irqsave(&rom_lock, rom_pl);
+- if (!die_nmi_called && !is_icru && !is_uefi)
+- asminline_call(&cmn_regs, cru_rom_addr);
+- die_nmi_called = 1;
+- spin_unlock_irqrestore(&rom_lock, rom_pl);
+-
+ if (allow_kdump)
+ hpwdt_stop();
+
+- if (!is_icru && !is_uefi) {
+- if (cmn_regs.u1.ral == 0) {
+- nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
+- return NMI_HANDLED;
+- }
+- }
+ nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
+ "for the NMI is logged in any one of the following "
+ "resources:\n"
+@@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
+ * Init & Exit
+ */
+
+-#ifdef CONFIG_HPWDT_NMI_DECODING
+-#ifdef CONFIG_X86_LOCAL_APIC
+-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
+-{
+- /*
+- * If nmi_watchdog is turned off then we can turn on
+- * our nmi decoding capability.
+- */
+- hpwdt_nmi_decoding = 1;
+-}
+-#else
+-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
+-{
+- dev_warn(&dev->dev, "NMI decoding is disabled. "
+- "Your kernel does not support a NMI Watchdog.\n");
+-}
+-#endif /* CONFIG_X86_LOCAL_APIC */
+-
+-/*
+- * dmi_find_icru
+- *
+- * Routine Description:
+- * This function checks whether or not we are on an iCRU-based server.
+- * This check is independent of architecture and needs to be made for
+- * any ProLiant system.
+- */
+-static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
+-{
+- struct smbios_proliant_info *smbios_proliant_ptr;
+-
+- if (dm->type == SMBIOS_ICRU_INFORMATION) {
+- smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
+- if (smbios_proliant_ptr->misc_features & 0x01)
+- is_icru = 1;
+- if (smbios_proliant_ptr->misc_features & 0x408)
+- is_uefi = 1;
+- }
+-}
+
+ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
+ {
++#ifdef CONFIG_HPWDT_NMI_DECODING
+ int retval;
+-
+- /*
+- * On typical CRU-based systems we need to map that service in
+- * the BIOS. For 32 bit Operating Systems we need to go through
+- * the 32 Bit BIOS Service Directory. For 64 bit Operating
+- * Systems we get that service through SMBIOS.
+- *
+- * On systems that support the new iCRU service all we need to
+- * do is call dmi_walk to get the supported flag value and skip
+- * the old cru detect code.
+- */
+- dmi_walk(dmi_find_icru, NULL);
+- if (!is_icru && !is_uefi) {
+-
+- /*
+- * We need to map the ROM to get the CRU service.
+- * For 32 bit Operating Systems we need to go through the 32 Bit
+- * BIOS Service Directory
+- * For 64 bit Operating Systems we get that service through SMBIOS.
+- */
+- retval = detect_cru_service();
+- if (retval < 0) {
+- dev_warn(&dev->dev,
+- "Unable to detect the %d Bit CRU Service.\n",
+- HPWDT_ARCH);
+- return retval;
+- }
+-
+- /*
+- * We know this is the only CRU call we need to make so lets keep as
+- * few instructions as possible once the NMI comes in.
+- */
+- cmn_regs.u1.rah = 0x0D;
+- cmn_regs.u1.ral = 0x02;
+- }
+-
+ /*
+ * Only one function can register for NMI_UNKNOWN
+ */
+@@ -771,44 +316,25 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
+ dev_warn(&dev->dev,
+ "Unable to register a die notifier (err=%d).\n",
+ retval);
+- if (cru_rom_addr)
+- iounmap(cru_rom_addr);
+ return retval;
++#endif /* CONFIG_HPWDT_NMI_DECODING */
++ return 0;
+ }
+
+ static void hpwdt_exit_nmi_decoding(void)
+ {
++#ifdef CONFIG_HPWDT_NMI_DECODING
+ unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+ unregister_nmi_handler(NMI_SERR, "hpwdt");
+ unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
+- if (cru_rom_addr)
+- iounmap(cru_rom_addr);
+-}
+-#else /* !CONFIG_HPWDT_NMI_DECODING */
+-static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
+-{
+-}
+-
+-static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
+-{
+- return 0;
++#endif
+ }
+
+-static void hpwdt_exit_nmi_decoding(void)
+-{
+-}
+-#endif /* CONFIG_HPWDT_NMI_DECODING */
+-
+ static int hpwdt_init_one(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+ {
+ int retval;
+
+- /*
+- * Check if we can do NMI decoding or not
+- */
+- hpwdt_check_nmi_decoding(dev);
+-
+ /*
+ * First let's find out if we are on an iLO2+ server. We will
+ * not run on a legacy ASM box.
+@@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
+ retval = -ENOMEM;
+ goto error_pci_iomap;
+ }
++ hpwdt_nmistat = pci_mem_addr + 0x6e;
+ hpwdt_timer_reg = pci_mem_addr + 0x70;
+ hpwdt_timer_con = pci_mem_addr + 0x72;
+
+@@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ #ifdef CONFIG_HPWDT_NMI_DECODING
+ module_param(allow_kdump, int, 0);
+ MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
+-#endif /* !CONFIG_HPWDT_NMI_DECODING */
++#endif /* CONFIG_HPWDT_NMI_DECODING */
+
+ module_pci_driver(hpwdt_driver);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 8c10b0562e75..621c517b325c 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -86,10 +86,10 @@ struct nfs_direct_req {
+ struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
+ int mirror_count;
+
++ loff_t io_start; /* Start offset for I/O */
+ ssize_t count, /* bytes actually processed */
+ max_count, /* max expected count */
+ bytes_left, /* bytes left to be sent */
+- io_start, /* start of IO */
+ error; /* any reported error */
+ struct completion completion; /* wait for i/o completion */
+
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index eb098ccfefd5..b99200828d08 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -292,8 +292,11 @@ pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
+ void
+ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
+ {
+- struct inode *inode = lo->plh_inode;
++ struct inode *inode;
+
++ if (!lo)
++ return;
++ inode = lo->plh_inode;
+ pnfs_layoutreturn_before_put_layout_hdr(lo);
+
+ if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+@@ -1241,10 +1244,12 @@ bool pnfs_roc(struct inode *ino,
+ spin_lock(&ino->i_lock);
+ lo = nfsi->layout;
+ if (!lo || !pnfs_layout_is_valid(lo) ||
+- test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
++ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
++ lo = NULL;
+ goto out_noroc;
++ }
++ pnfs_get_layout_hdr(lo);
+ if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
+- pnfs_get_layout_hdr(lo);
+ spin_unlock(&ino->i_lock);
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ TASK_UNINTERRUPTIBLE);
+@@ -1312,10 +1317,12 @@ bool pnfs_roc(struct inode *ino,
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
+ if (ld->prepare_layoutreturn)
+ ld->prepare_layoutreturn(args);
++ pnfs_put_layout_hdr(lo);
+ return true;
+ }
+ if (layoutreturn)
+ pnfs_send_layoutreturn(lo, &stateid, iomode, true);
++ pnfs_put_layout_hdr(lo);
+ return false;
+ }
+
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index cf61108f8f8d..8607ad8626f6 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1878,40 +1878,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+ return status;
+ }
+
+-int nfs_commit_inode(struct inode *inode, int how)
++static int __nfs_commit_inode(struct inode *inode, int how,
++ struct writeback_control *wbc)
+ {
+ LIST_HEAD(head);
+ struct nfs_commit_info cinfo;
+ int may_wait = how & FLUSH_SYNC;
+- int error = 0;
+- int res;
++ int ret, nscan;
+
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ nfs_commit_begin(cinfo.mds);
+- res = nfs_scan_commit(inode, &head, &cinfo);
+- if (res)
+- error = nfs_generic_commit_list(inode, &head, how, &cinfo);
++ for (;;) {
++ ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
++ if (ret <= 0)
++ break;
++ ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
++ if (ret < 0)
++ break;
++ ret = 0;
++ if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
++ if (nscan < wbc->nr_to_write)
++ wbc->nr_to_write -= nscan;
++ else
++ wbc->nr_to_write = 0;
++ }
++ if (nscan < INT_MAX)
++ break;
++ cond_resched();
++ }
+ nfs_commit_end(cinfo.mds);
+- if (res == 0)
+- return res;
+- if (error < 0)
+- goto out_error;
+- if (!may_wait)
+- goto out_mark_dirty;
+- error = wait_on_commit(cinfo.mds);
+- if (error < 0)
+- return error;
+- return res;
+-out_error:
+- res = error;
+- /* Note: If we exit without ensuring that the commit is complete,
+- * we must mark the inode as dirty. Otherwise, future calls to
+- * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
+- * that the data is on the disk.
+- */
+-out_mark_dirty:
+- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+- return res;
++ if (ret || !may_wait)
++ return ret;
++ return wait_on_commit(cinfo.mds);
++}
++
++int nfs_commit_inode(struct inode *inode, int how)
++{
++ return __nfs_commit_inode(inode, how, NULL);
+ }
+ EXPORT_SYMBOL_GPL(nfs_commit_inode);
+
+@@ -1921,11 +1924,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ int flags = FLUSH_SYNC;
+ int ret = 0;
+
+- /* no commits means nothing needs to be done */
+- if (!atomic_long_read(&nfsi->commit_info.ncommit))
+- return ret;
+-
+ if (wbc->sync_mode == WB_SYNC_NONE) {
++ /* no commits means nothing needs to be done */
++ if (!atomic_long_read(&nfsi->commit_info.ncommit))
++ goto check_requests_outstanding;
++
+ /* Don't commit yet if this is a non-blocking flush and there
+ * are a lot of outstanding writes for this mapping.
+ */
+@@ -1936,16 +1939,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ flags = 0;
+ }
+
+- ret = nfs_commit_inode(inode, flags);
+- if (ret >= 0) {
+- if (wbc->sync_mode == WB_SYNC_NONE) {
+- if (ret < wbc->nr_to_write)
+- wbc->nr_to_write -= ret;
+- else
+- wbc->nr_to_write = 0;
+- }
+- return 0;
+- }
++ ret = __nfs_commit_inode(inode, flags, wbc);
++ if (!ret) {
++ if (flags & FLUSH_SYNC)
++ return 0;
++ } else if (atomic_long_read(&nfsi->commit_info.ncommit))
++ goto out_mark_dirty;
++
++check_requests_outstanding:
++ if (!atomic_read(&nfsi->commit_info.rpcs_out))
++ return ret;
+ out_mark_dirty:
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index beb945e1963c..ef3e7ea76296 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -678,9 +678,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ stack[ctr].layer = lower.layer;
+ ctr++;
+
+- if (d.stop)
+- break;
+-
+ /*
+ * Following redirects can have security consequences: it's like
+ * a symlink into the lower layer without the permission checks.
+@@ -697,6 +694,9 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ goto out_put;
+ }
+
++ if (d.stop)
++ break;
++
+ if (d.redirect && d.redirect[0] == '/' && poe != roe) {
+ poe = roe;
+
+diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+index 76e237bd989b..6914633037a5 100644
+--- a/include/drm/drm_crtc_helper.h
++++ b/include/drm/drm_crtc_helper.h
+@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+ void drm_kms_helper_poll_disable(struct drm_device *dev);
+ void drm_kms_helper_poll_enable(struct drm_device *dev);
++bool drm_kms_helper_is_poll_worker(void);
+
+ #endif
+diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
+index 412e83a4d3db..29c839ed656b 100644
+--- a/include/drm/drm_drv.h
++++ b/include/drm/drm_drv.h
+@@ -55,6 +55,7 @@ struct drm_mode_create_dumb;
+ #define DRIVER_ATOMIC 0x10000
+ #define DRIVER_KMS_LEGACY_CONTEXT 0x20000
+ #define DRIVER_SYNCOBJ 0x40000
++#define DRIVER_PREFER_XBGR_30BPP 0x80000
+
+ /**
+ * struct drm_driver - DRM driver structure
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 3b609edffa8f..be3aef6839f6 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -19,3 +19,8 @@
+
+ #define randomized_struct_fields_start struct {
+ #define randomized_struct_fields_end };
++
++/* Clang doesn't have a way to turn it off per-function, yet. */
++#ifdef __noretpoline
++#undef __noretpoline
++#endif
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 73bc63e0a1c4..673fbf904fe5 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -93,6 +93,10 @@
+ #define __weak __attribute__((weak))
+ #define __alias(symbol) __attribute__((alias(#symbol)))
+
++#ifdef RETPOLINE
++#define __noretpoline __attribute__((indirect_branch("keep")))
++#endif
++
+ /*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 506a98151131..bc27cf03c41e 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -6,10 +6,10 @@
+ #include <linux/types.h>
+
+ /* Built-in __init functions needn't be compiled with retpoline */
+-#if defined(RETPOLINE) && !defined(MODULE)
+-#define __noretpoline __attribute__((indirect_branch("keep")))
++#if defined(__noretpoline) && !defined(MODULE)
++#define __noinitretpoline __noretpoline
+ #else
+-#define __noretpoline
++#define __noinitretpoline
+ #endif
+
+ /* These macros are used to mark some functions or
+@@ -47,7 +47,7 @@
+
+ /* These are for everybody (although not all archs will actually
+ discard it in modules) */
+-#define __init __section(.init.text) __cold __latent_entropy __noretpoline
++#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
+ #define __initdata __section(.init.data)
+ #define __initconst __section(.init.rodata)
+ #define __exitdata __section(.exit.data)
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index 132e3f5a2e0d..e791ebc65c9c 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -5,6 +5,7 @@
+
+ #ifndef _LINUX_NOSPEC_H
+ #define _LINUX_NOSPEC_H
++#include <asm/barrier.h>
+
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+@@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ }
+ #endif
+
+-/*
+- * Warn developers about inappropriate array_index_nospec() usage.
+- *
+- * Even if the CPU speculates past the WARN_ONCE branch, the
+- * sign bit of @index is taken into account when generating the
+- * mask.
+- *
+- * This warning is compiled out when the compiler can infer that
+- * @index and @size are less than LONG_MAX.
+- */
+-#define array_index_mask_nospec_check(index, size) \
+-({ \
+- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
+- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
+- _mask = 0; \
+- else \
+- _mask = array_index_mask_nospec(index, size); \
+- _mask; \
+-})
+-
+ /*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+@@ -67,7 +48,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ ({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
++ unsigned long _mask = array_index_mask_nospec(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 5a090f5ab335..881312d85574 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -50,6 +50,7 @@ struct tpm_class_ops {
+ unsigned long *timeout_cap);
+ int (*request_locality)(struct tpm_chip *chip, int loc);
+ void (*relinquish_locality)(struct tpm_chip *chip, int loc);
++ void (*clk_enable)(struct tpm_chip *chip, bool value);
+ };
+
+ #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 4a54ef96aff5..bc0cda180c8b 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
+
+ extern void workqueue_set_max_active(struct workqueue_struct *wq,
+ int max_active);
++extern struct work_struct *current_work(void);
+ extern bool current_is_workqueue_rescuer(void);
+ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
+ extern unsigned int work_busy(struct work_struct *work);
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index 7fb57e905526..7bc752fc98de 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -69,6 +69,9 @@ struct scsi_cmnd {
+ struct list_head list; /* scsi_cmnd participates in queue lists */
+ struct list_head eh_entry; /* entry for the host eh_cmd_q */
+ struct delayed_work abort_work;
++
++ struct rcu_head rcu;
++
+ int eh_eflags; /* Used by error handlr */
+
+ /*
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 1a1df0d21ee3..a8b7bf879ced 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -571,8 +571,6 @@ struct Scsi_Host {
+ struct blk_mq_tag_set tag_set;
+ };
+
+- struct rcu_head rcu;
+-
+ atomic_t host_busy; /* commands actually active on low-level */
+ atomic_t host_blocked;
+
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index ce5b669003b2..ea8212118404 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -339,7 +339,7 @@ static int cpu_map_kthread_run(void *data)
+
+ struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
+ {
+- gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
++ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+ struct bpf_cpu_map_entry *rcpu;
+ int numa, err;
+
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 2cfef408fec9..4b794f1d8561 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
+ */
+ __visible void __stack_chk_fail(void)
+ {
+- panic("stack-protector: Kernel stack is corrupted in: %p\n",
++ panic("stack-protector: Kernel stack is corrupted in: %pB\n",
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index f699122dab32..34f1e1a2ec12 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4168,6 +4168,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
+ }
+ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
+
++/**
++ * current_work - retrieve %current task's work struct
++ *
++ * Determine if %current task is a workqueue worker and what it's working on.
++ * Useful to find out the context that the %current task is running in.
++ *
++ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
++ */
++struct work_struct *current_work(void)
++{
++ struct worker *worker = current_wq_worker();
++
++ return worker ? worker->current_work : NULL;
++}
++EXPORT_SYMBOL(current_work);
++
+ /**
+ * current_is_workqueue_rescuer - is %current workqueue rescuer?
+ *
+diff --git a/lib/bug.c b/lib/bug.c
+index c1b0fad31b10..1077366f496b 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
++ if (!bug)
++ return BUG_TRAP_TYPE_NONE;
+
+ file = NULL;
+ line = 0;
+@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ if (file)
+ pr_crit("kernel BUG at %s:%u!\n", file, line);
+ else
+- pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
++ pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
+ (void *)bugaddr);
+
+ return BUG_TRAP_TYPE_BUG;
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 46aacdfa4f4d..d25b5a456cca 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
+ struct memblock_type *type = &memblock.memory;
+ unsigned int right = type->cnt;
+ unsigned int mid, left = 0;
+- phys_addr_t addr = PFN_PHYS(pfn + 1);
++ phys_addr_t addr = PFN_PHYS(++pfn);
+
+ do {
+ mid = (right + left) / 2;
+@@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
+ type->regions[mid].size))
+ left = mid + 1;
+ else {
+- /* addr is within the region, so pfn + 1 is valid */
+- return min(pfn + 1, max_pfn);
++ /* addr is within the region, so pfn is valid */
++ return pfn;
+ }
+ } while (left < right);
+
+ if (right == type->cnt)
+- return max_pfn;
++ return -1UL;
+ else
+- return min(PHYS_PFN(type->regions[right].base), max_pfn);
++ return PHYS_PFN(type->regions[right].base);
+ }
+
+ /**
+diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
+index 279527f8b1fe..59baaecd3e54 100644
+--- a/net/bridge/netfilter/ebt_among.c
++++ b/net/bridge/netfilter/ebt_among.c
+@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ return true;
+ }
+
++static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
++{
++ return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
++}
++
+ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ {
+ const struct ebt_among_info *info = par->matchinfo;
+ const struct ebt_entry_match *em =
+ container_of(par->matchinfo, const struct ebt_entry_match, data);
+- int expected_length = sizeof(struct ebt_among_info);
++ unsigned int expected_length = sizeof(struct ebt_among_info);
+ const struct ebt_mac_wormhash *wh_dst, *wh_src;
+ int err;
+
++ if (expected_length > em->match_size)
++ return -EINVAL;
++
+ wh_dst = ebt_among_wh_dst(info);
+- wh_src = ebt_among_wh_src(info);
++ if (poolsize_invalid(wh_dst))
++ return -EINVAL;
++
+ expected_length += ebt_mac_wormhash_size(wh_dst);
++ if (expected_length > em->match_size)
++ return -EINVAL;
++
++ wh_src = ebt_among_wh_src(info);
++ if (poolsize_invalid(wh_src))
++ return -EINVAL;
++
+ expected_length += ebt_mac_wormhash_size(wh_src);
+
+ if (em->match_size != EBT_ALIGN(expected_length)) {
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 37817d25b63d..895ba1cd9750 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2053,7 +2053,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+ if (match_kern)
+ match_kern->match_size = ret;
+
+- WARN_ON(type == EBT_COMPAT_TARGET && size_left);
++ if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
++ return -EINVAL;
++
+ match32 = (struct compat_ebt_entry_mwt *) buf;
+ }
+
+@@ -2109,6 +2111,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+ *
+ * offsets are relative to beginning of struct ebt_entry (i.e., 0).
+ */
++ for (i = 0; i < 4 ; ++i) {
++ if (offsets[i] >= *total)
++ return -EINVAL;
++ if (i == 0)
++ continue;
++ if (offsets[i-1] > offsets[i])
++ return -EINVAL;
++ }
++
+ for (i = 0, j = 1 ; j < 4 ; j++, i++) {
+ struct compat_ebt_entry_mwt *match32;
+ unsigned int size;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 0c3c944a7b72..8e5185ad6310 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -257,6 +257,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
+ }
+ if (table_base + v
+ != arpt_next_entry(e)) {
++ if (unlikely(stackidx >= private->stacksize)) {
++ verdict = NF_DROP;
++ break;
++ }
+ jumpstack[stackidx++] = e;
+ }
+
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 2e0d339028bb..a74a81624983 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -335,8 +335,13 @@ ipt_do_table(struct sk_buff *skb,
+ continue;
+ }
+ if (table_base + v != ipt_next_entry(e) &&
+- !(e->ip.flags & IPT_F_GOTO))
++ !(e->ip.flags & IPT_F_GOTO)) {
++ if (unlikely(stackidx >= private->stacksize)) {
++ verdict = NF_DROP;
++ break;
++ }
+ jumpstack[stackidx++] = e;
++ }
+
+ e = get_entry(table_base, v);
+ continue;
+diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+index 1e4a7209a3d2..77a01c484807 100644
+--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
+
+ local_bh_disable();
+ if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
+- list_del_rcu(&c->list);
+- spin_unlock(&cn->lock);
+- local_bh_enable();
+-
+- unregister_netdevice_notifier(&c->notifier);
+-
+ /* In case anyone still accesses the file, the open/close
+ * functions are also incrementing the refcount on their own,
+ * so it's safe to remove the entry even if it's in use. */
+@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
+ if (cn->procdir)
+ proc_remove(c->pde);
+ #endif
++ list_del_rcu(&c->list);
++ spin_unlock(&cn->lock);
++ local_bh_enable();
++
++ unregister_netdevice_notifier(&c->notifier);
++
+ return;
+ }
+ local_bh_enable();
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 39970e212ad5..9bf260459f83 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -21,18 +21,19 @@
+ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+ {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
++ struct sock *sk = sk_to_full_sk(skb->sk);
+ unsigned int hh_len;
+ struct dst_entry *dst;
+ struct flowi6 fl6 = {
+- .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
++ .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
+ .flowi6_mark = skb->mark,
+- .flowi6_uid = sock_net_uid(net, skb->sk),
++ .flowi6_uid = sock_net_uid(net, sk),
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ };
+ int err;
+
+- dst = ip6_route_output(net, skb->sk, &fl6);
++ dst = ip6_route_output(net, sk, &fl6);
+ err = dst->error;
+ if (err) {
+ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+ if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
+ skb_dst_set(skb, NULL);
+- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
++ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+ skb_dst_set(skb, dst);
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 1d7ae9366335..51f3bc632c7c 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -357,6 +357,10 @@ ip6t_do_table(struct sk_buff *skb,
+ }
+ if (table_base + v != ip6t_next_entry(e) &&
+ !(e->ipv6.flags & IP6T_F_GOTO)) {
++ if (unlikely(stackidx >= private->stacksize)) {
++ verdict = NF_DROP;
++ break;
++ }
+ jumpstack[stackidx++] = e;
+ }
+
+diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+index 1d2fb9267d6f..6a203fa82dbd 100644
+--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
+ !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
+ target, maniptype))
+ return false;
++
++ /* must reload, offset might have changed */
++ ipv6h = (void *)skb->data + iphdroff;
++
+ manip_addr:
+ if (maniptype == NF_NAT_MANIP_SRC)
+ ipv6h->saddr = target->src.u3.in6;
+diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
+index fbce552a796e..7d7466dbf663 100644
+--- a/net/netfilter/nf_nat_proto_common.c
++++ b/net/netfilter/nf_nat_proto_common.c
+@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ const struct nf_conn *ct,
+ u16 *rover)
+ {
+- unsigned int range_size, min, i;
++ unsigned int range_size, min, max, i;
+ __be16 *portptr;
+ u_int16_t off;
+
+@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ }
+ } else {
+ min = ntohs(range->min_proto.all);
+- range_size = ntohs(range->max_proto.all) - min + 1;
++ max = ntohs(range->max_proto.all);
++ if (unlikely(max < min))
++ swap(max, min);
++ range_size = max - min + 1;
+ }
+
+ if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
+diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
+index ee3421ad108d..18b7412ab99a 100644
+--- a/net/netfilter/xt_IDLETIMER.c
++++ b/net/netfilter/xt_IDLETIMER.c
+@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
+ timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
+ info->timer->refcnt = 1;
+
++ INIT_WORK(&info->timer->work, idletimer_tg_work);
++
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+- INIT_WORK(&info->timer->work, idletimer_tg_work);
+-
+ return 0;
+
+ out_free_attr:
+@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+ pr_debug("timeout value is zero\n");
+ return -EINVAL;
+ }
+-
++ if (info->timeout >= INT_MAX / 1000) {
++ pr_debug("timeout value is too big\n");
++ return -EINVAL;
++ }
+ if (info->label[0] == '\0' ||
+ strnlen(info->label,
+ MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
+diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
+index 0971634e5444..18d3af5e1098 100644
+--- a/net/netfilter/xt_LED.c
++++ b/net/netfilter/xt_LED.c
+@@ -142,9 +142,10 @@ static int led_tg_check(const struct xt_tgchk_param *par)
+ goto exit_alloc;
+ }
+
+- /* See if we need to set up a timer */
+- if (ledinfo->delay > 0)
+- timer_setup(&ledinternal->timer, led_timeout_callback, 0);
++ /* Since the letinternal timer can be shared between multiple targets,
++ * always set it up, even if the current target does not need it
++ */
++ timer_setup(&ledinternal->timer, led_timeout_callback, 0);
+
+ list_add_tail(&ledinternal->list, &xt_led_triggers);
+
+@@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
+
+ list_del(&ledinternal->list);
+
+- if (ledinfo->delay > 0)
+- del_timer_sync(&ledinternal->timer);
++ del_timer_sync(&ledinternal->timer);
+
+ led_trigger_unregister(&ledinternal->netfilter_led_trigger);
+
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 5da8746f7b88..b8a3e740ffd4 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -774,7 +774,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
+ if (!dh->rateinfo.prev_window &&
+ (dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
+ spin_unlock(&dh->lock);
+- rcu_read_unlock_bh();
++ local_bh_enable();
+ return !(cfg->mode & XT_HASHLIMIT_INVERT);
+ } else {
+ goto overlimit;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 6451c5013e06..af465e681b9b 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1369,8 +1369,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
+ smc->use_fallback = false; /* assume rdma capability first */
+ rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
+ IPPROTO_TCP, &smc->clcsock);
+- if (rc)
++ if (rc) {
+ sk_common_release(sk);
++ goto out;
++ }
+ smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
+ smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
+
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 47cddf32aeba..4f2b25d43ec9 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
+
+ objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
+
++objtool_args += $(if $(part-of-module), --module,)
++
+ ifndef CONFIG_FRAME_POINTER
+ objtool_args += --no-fp
+ endif
+@@ -264,6 +266,12 @@ objtool_args += --no-unreachable
+ else
+ objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
+ endif
++ifdef CONFIG_RETPOLINE
++ifneq ($(RETPOLINE_CFLAGS),)
++ objtool_args += --retpoline
++endif
++endif
++
+
+ ifdef CONFIG_MODVERSIONS
+ objtool_o = $(@D)/.tmp_$(@F)
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 015aa9dbad86..06cf4c00fe88 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -287,11 +287,11 @@ cmd_dt_S_dtb= \
+ echo '\#include <asm-generic/vmlinux.lds.h>'; \
+ echo '.section .dtb.init.rodata,"a"'; \
+ echo '.balign STRUCT_ALIGNMENT'; \
+- echo '.global __dtb_$(*F)_begin'; \
+- echo '__dtb_$(*F)_begin:'; \
++ echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
++ echo '__dtb_$(subst -,_,$(*F))_begin:'; \
+ echo '.incbin "$<" '; \
+- echo '__dtb_$(*F)_end:'; \
+- echo '.global __dtb_$(*F)_end'; \
++ echo '__dtb_$(subst -,_,$(*F))_end:'; \
++ echo '.global __dtb_$(subst -,_,$(*F))_end'; \
+ echo '.balign STRUCT_ALIGNMENT'; \
+ ) > $@
+
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index a42cbbf2c8d9..35ff97bfd492 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
+ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
+ struct snd_seq_event *event,
+ struct file *file, int blocking,
+- int atomic, int hop)
++ int atomic, int hop,
++ struct mutex *mutexp)
+ {
+ struct snd_seq_event_cell *cell;
+ int err;
+@@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
+ return -ENXIO; /* queue is not allocated */
+
+ /* allocate an event cell */
+- err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
++ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
++ file, mutexp);
+ if (err < 0)
+ return err;
+
+@@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ return -ENXIO;
+
+ /* allocate the pool now if the pool is not allocated yet */
++ mutex_lock(&client->ioctl_mutex);
+ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+- mutex_lock(&client->ioctl_mutex);
+ err = snd_seq_pool_init(client->pool);
+- mutex_unlock(&client->ioctl_mutex);
+ if (err < 0)
+- return -ENOMEM;
++ goto out;
+ }
+
+ /* only process whole events */
+@@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ /* ok, enqueue it */
+ err = snd_seq_client_enqueue_event(client, &event, file,
+ !(file->f_flags & O_NONBLOCK),
+- 0, 0);
++ 0, 0, &client->ioctl_mutex);
+ if (err < 0)
+ break;
+
+@@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ written += len;
+ }
+
++ out:
++ mutex_unlock(&client->ioctl_mutex);
+ return written ? written : err;
+ }
+
+@@ -1838,6 +1841,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
+ (! snd_seq_write_pool_allocated(client) ||
+ info->output_pool != client->pool->size)) {
+ if (snd_seq_write_pool_allocated(client)) {
++ /* is the pool in use? */
++ if (atomic_read(&client->pool->counter))
++ return -EBUSY;
+ /* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
+ snd_seq_queue_client_leave_cells(client->number);
+@@ -2260,7 +2266,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ if (! cptr->accept_output)
+ result = -EPERM;
+ else /* send it */
+- result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
++ result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
++ atomic, hop, NULL);
+
+ snd_seq_client_unlock(cptr);
+ return result;
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index a8c2822e0198..72c0302a55d2 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
+ return -EINVAL;
+
+ snd_use_lock_use(&f->use_lock);
+- err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
++ err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
+ if (err < 0) {
+ if ((err == -ENOMEM) || (err == -EAGAIN))
+ atomic_inc(&f->overflow);
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index f763682584a8..ab1112e90f88 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
+ */
+ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
+ struct snd_seq_event_cell **cellp,
+- int nonblock, struct file *file)
++ int nonblock, struct file *file,
++ struct mutex *mutexp)
+ {
+ struct snd_seq_event_cell *cell;
+ unsigned long flags;
+@@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&pool->output_sleep, &wait);
+ spin_unlock_irq(&pool->lock);
++ if (mutexp)
++ mutex_unlock(mutexp);
+ schedule();
++ if (mutexp)
++ mutex_lock(mutexp);
+ spin_lock_irq(&pool->lock);
+ remove_wait_queue(&pool->output_sleep, &wait);
+ /* interrupted? */
+@@ -287,7 +292,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
+ */
+ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ struct snd_seq_event_cell **cellp, int nonblock,
+- struct file *file)
++ struct file *file, struct mutex *mutexp)
+ {
+ int ncells, err;
+ unsigned int extlen;
+@@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ if (ncells >= pool->total_elements)
+ return -ENOMEM;
+
+- err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
++ err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
+ if (err < 0)
+ return err;
+
+@@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ int size = sizeof(struct snd_seq_event);
+ if (len < size)
+ size = len;
+- err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
++ err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
++ mutexp);
+ if (err < 0)
+ goto __error;
+ if (cell->event.data.ext.ptr == NULL)
+diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
+index 32f959c17786..3abe306c394a 100644
+--- a/sound/core/seq/seq_memory.h
++++ b/sound/core/seq/seq_memory.h
+@@ -66,7 +66,8 @@ struct snd_seq_pool {
+ void snd_seq_cell_free(struct snd_seq_event_cell *cell);
+
+ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+- struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
++ struct snd_seq_event_cell **cellp, int nonblock,
++ struct file *file, struct mutex *mutexp);
+
+ /* return number of unused (free) cells */
+ static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 37e1cf8218ff..5b4dbcec6de8 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
++ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
++ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8fe38c18e29d..18bab5ffbe4a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5152,6 +5152,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
+ }
+ }
+
++/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
++static void alc295_fixup_disable_dac3(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ hda_nid_t conn[2] = { 0x02, 0x03 };
++ snd_hda_override_conn_list(codec, 0x17, 2, conn);
++ }
++}
++
+ /* Hook to update amp GPIO4 for automute */
+ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+@@ -5344,6 +5354,7 @@ enum {
+ ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC295_FIXUP_DISABLE_DAC3,
+ ALC280_FIXUP_HP_HEADSET_MIC,
+ ALC221_FIXUP_HP_FRONT_MIC,
+ ALC292_FIXUP_TPT460,
+@@ -5358,10 +5369,12 @@ enum {
+ ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
+ ALC233_FIXUP_LENOVO_MULTI_CODECS,
+ ALC294_FIXUP_LENOVO_MIC_LOCATION,
++ ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
+ ALC700_FIXUP_INTEL_REFERENCE,
+ ALC274_FIXUP_DELL_BIND_DACS,
+ ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ ALC298_FIXUP_TPT470_DOCK,
++ ALC255_FIXUP_DUMMY_LINEOUT_VERB,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6076,6 +6089,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
+ },
++ [ALC295_FIXUP_DISABLE_DAC3] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc295_fixup_disable_dac3,
++ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -6161,6 +6178,18 @@ static const struct hda_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0101102f }, /* Rear Headset HP */
++ { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */
++ { 0x1a, 0x01a19030 }, /* Rear Headset MIC */
++ { 0x1b, 0x02011020 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++ },
+ [ALC700_FIXUP_INTEL_REFERENCE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -6197,6 +6226,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
+ },
++ [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x0201101f },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6245,10 +6283,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
++ SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
++ SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
++ SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -6386,9 +6427,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+@@ -6750,7 +6793,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60120},
+ {0x14, 0x90170110},
+ {0x21, 0x0321101f}),
+- SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
+diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
+index 57254f5b2779..694abc628e9b 100644
+--- a/tools/objtool/builtin-check.c
++++ b/tools/objtool/builtin-check.c
+@@ -29,7 +29,7 @@
+ #include "builtin.h"
+ #include "check.h"
+
+-bool no_fp, no_unreachable;
++bool no_fp, no_unreachable, retpoline, module;
+
+ static const char * const check_usage[] = {
+ "objtool check [<options>] file.o",
+@@ -39,6 +39,8 @@ static const char * const check_usage[] = {
+ const struct option check_options[] = {
+ OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
+ OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
++ OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
++ OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+ OPT_END(),
+ };
+
+@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
+
+ objname = argv[0];
+
+- return check(objname, no_fp, no_unreachable, false);
++ return check(objname, false);
+ }
+diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
+index 91e8e19ff5e0..77ea2b97117d 100644
+--- a/tools/objtool/builtin-orc.c
++++ b/tools/objtool/builtin-orc.c
+@@ -25,7 +25,6 @@
+ */
+
+ #include <string.h>
+-#include <subcmd/parse-options.h>
+ #include "builtin.h"
+ #include "check.h"
+
+@@ -36,9 +35,6 @@ static const char *orc_usage[] = {
+ NULL,
+ };
+
+-extern const struct option check_options[];
+-extern bool no_fp, no_unreachable;
+-
+ int cmd_orc(int argc, const char **argv)
+ {
+ const char *objname;
+@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
+
+ objname = argv[0];
+
+- return check(objname, no_fp, no_unreachable, true);
++ return check(objname, true);
+ }
+
+ if (!strcmp(argv[0], "dump")) {
+diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
+index dd526067fed5..28ff40e19a14 100644
+--- a/tools/objtool/builtin.h
++++ b/tools/objtool/builtin.h
+@@ -17,6 +17,11 @@
+ #ifndef _BUILTIN_H
+ #define _BUILTIN_H
+
++#include <subcmd/parse-options.h>
++
++extern const struct option check_options[];
++extern bool no_fp, no_unreachable, retpoline, module;
++
+ extern int cmd_check(int argc, const char **argv);
+ extern int cmd_orc(int argc, const char **argv);
+
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index c7fb5c2392ee..9d01d0b1084e 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -18,6 +18,7 @@
+ #include <string.h>
+ #include <stdlib.h>
+
++#include "builtin.h"
+ #include "check.h"
+ #include "elf.h"
+ #include "special.h"
+@@ -33,7 +34,6 @@ struct alternative {
+ };
+
+ const char *objname;
+-static bool no_fp;
+ struct cfi_state initial_func_cfi;
+
+ struct instruction *find_insn(struct objtool_file *file,
+@@ -496,6 +496,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
++ insn->retpoline_safe = true;
+ continue;
+ } else {
+ /* sibling call */
+@@ -547,7 +548,8 @@ static int add_call_destinations(struct objtool_file *file)
+ if (!insn->call_dest && !insn->ignore) {
+ WARN_FUNC("unsupported intra-function call",
+ insn->sec, insn->offset);
+- WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
++ if (retpoline)
++ WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+ return -1;
+ }
+
+@@ -922,7 +924,11 @@ static struct rela *find_switch_table(struct objtool_file *file,
+ if (find_symbol_containing(file->rodata, text_rela->addend))
+ continue;
+
+- return find_rela_by_dest(file->rodata, text_rela->addend);
++ rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
++ if (!rodata_rela)
++ continue;
++
++ return rodata_rela;
+ }
+
+ return NULL;
+@@ -1107,6 +1113,41 @@ static int read_unwind_hints(struct objtool_file *file)
+ return 0;
+ }
+
++static int read_retpoline_hints(struct objtool_file *file)
++{
++ struct section *sec;
++ struct instruction *insn;
++ struct rela *rela;
++
++ sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
++ if (!sec)
++ return 0;
++
++ list_for_each_entry(rela, &sec->rela_list, list) {
++ if (rela->sym->type != STT_SECTION) {
++ WARN("unexpected relocation symbol type in %s", sec->name);
++ return -1;
++ }
++
++ insn = find_insn(file, rela->sym->sec, rela->addend);
++ if (!insn) {
++ WARN("bad .discard.retpoline_safe entry");
++ return -1;
++ }
++
++ if (insn->type != INSN_JUMP_DYNAMIC &&
++ insn->type != INSN_CALL_DYNAMIC) {
++ WARN_FUNC("retpoline_safe hint not an indirect jump/call",
++ insn->sec, insn->offset);
++ return -1;
++ }
++
++ insn->retpoline_safe = true;
++ }
++
++ return 0;
++}
++
+ static int decode_sections(struct objtool_file *file)
+ {
+ int ret;
+@@ -1145,6 +1186,10 @@ static int decode_sections(struct objtool_file *file)
+ if (ret)
+ return ret;
+
++ ret = read_retpoline_hints(file);
++ if (ret)
++ return ret;
++
+ return 0;
+ }
+
+@@ -1890,6 +1935,38 @@ static int validate_unwind_hints(struct objtool_file *file)
+ return warnings;
+ }
+
++static int validate_retpoline(struct objtool_file *file)
++{
++ struct instruction *insn;
++ int warnings = 0;
++
++ for_each_insn(file, insn) {
++ if (insn->type != INSN_JUMP_DYNAMIC &&
++ insn->type != INSN_CALL_DYNAMIC)
++ continue;
++
++ if (insn->retpoline_safe)
++ continue;
++
++ /*
++ * .init.text code is ran before userspace and thus doesn't
++ * strictly need retpolines, except for modules which are
++ * loaded late, they very much do need retpoline in their
++ * .init.text
++ */
++ if (!strcmp(insn->sec->name, ".init.text") && !module)
++ continue;
++
++ WARN_FUNC("indirect %s found in RETPOLINE build",
++ insn->sec, insn->offset,
++ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
++
++ warnings++;
++ }
++
++ return warnings;
++}
++
+ static bool is_kasan_insn(struct instruction *insn)
+ {
+ return (insn->type == INSN_CALL &&
+@@ -2021,13 +2098,12 @@ static void cleanup(struct objtool_file *file)
+ elf_close(file->elf);
+ }
+
+-int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
++int check(const char *_objname, bool orc)
+ {
+ struct objtool_file file;
+ int ret, warnings = 0;
+
+ objname = _objname;
+- no_fp = _no_fp;
+
+ file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
+ if (!file.elf)
+@@ -2051,6 +2127,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
+ if (list_empty(&file.insn_list))
+ goto out;
+
++ if (retpoline) {
++ ret = validate_retpoline(&file);
++ if (ret < 0)
++ return ret;
++ warnings += ret;
++ }
++
+ ret = validate_functions(&file);
+ if (ret < 0)
+ goto out;
+diff --git a/tools/objtool/check.h b/tools/objtool/check.h
+index 23a1d065cae1..c6b68fcb926f 100644
+--- a/tools/objtool/check.h
++++ b/tools/objtool/check.h
+@@ -45,6 +45,7 @@ struct instruction {
+ unsigned char type;
+ unsigned long immediate;
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
++ bool retpoline_safe;
+ struct symbol *call_dest;
+ struct instruction *jump_dest;
+ struct instruction *first_jump_src;
+@@ -63,7 +64,7 @@ struct objtool_file {
+ bool ignore_unreachables, c_file, hints;
+ };
+
+-int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
++int check(const char *objname, bool orc);
+
+ struct instruction *find_insn(struct objtool_file *file,
+ struct section *sec, unsigned long offset);
+diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
+index 370138e7e35c..88223bc7c82b 100644
+--- a/tools/perf/util/trigger.h
++++ b/tools/perf/util/trigger.h
+@@ -12,7 +12,7 @@
+ * States and transits:
+ *
+ *
+- * OFF--(on)--> READY --(hit)--> HIT
++ * OFF--> ON --> READY --(hit)--> HIT
+ * ^ |
+ * | (ready)
+ * | |
+@@ -27,8 +27,9 @@ struct trigger {
+ volatile enum {
+ TRIGGER_ERROR = -2,
+ TRIGGER_OFF = -1,
+- TRIGGER_READY = 0,
+- TRIGGER_HIT = 1,
++ TRIGGER_ON = 0,
++ TRIGGER_READY = 1,
++ TRIGGER_HIT = 2,
+ } state;
+ const char *name;
+ };
+@@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t)
+ static inline void trigger_on(struct trigger *t)
+ {
+ TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
+- t->state = TRIGGER_READY;
++ t->state = TRIGGER_ON;
+ }
+
+ static inline void trigger_ready(struct trigger *t)