summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1008_linux-5.14.9.patch6265
2 files changed, 6269 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index dcc9f9aa..21444f8a 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1007_linux-5.14.8.patch
From: http://www.kernel.org
Desc: Linux 5.14.8
+Patch: 1008_linux-5.14.9.patch
+From: http://www.kernel.org
+Desc: Linux 5.14.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-5.14.9.patch b/1008_linux-5.14.9.patch
new file mode 100644
index 00000000..e5d16b95
--- /dev/null
+++ b/1008_linux-5.14.9.patch
@@ -0,0 +1,6265 @@
+diff --git a/Makefile b/Makefile
+index d6b4737194b88..50c17e63c54ef 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 14
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Opossums on Parade
+
+diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
+index 0fab5ac907758..c9cb554fbe54c 100644
+--- a/arch/alpha/include/asm/io.h
++++ b/arch/alpha/include/asm/io.h
+@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
+ * Change virtual addresses to physical addresses and vv.
+ */
+ #ifdef USE_48_BIT_KSEG
+-static inline unsigned long virt_to_phys(void *address)
++static inline unsigned long virt_to_phys(volatile void *address)
+ {
+ return (unsigned long)address - IDENT_ADDR;
+ }
+@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
+ return (void *) (address + IDENT_ADDR);
+ }
+ #else
+-static inline unsigned long virt_to_phys(void *address)
++static inline unsigned long virt_to_phys(volatile void *address)
+ {
+ unsigned long phys = (unsigned long)address;
+
+@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
+ extern unsigned long __direct_map_base;
+ extern unsigned long __direct_map_size;
+
+-static inline unsigned long __deprecated virt_to_bus(void *address)
++static inline unsigned long __deprecated virt_to_bus(volatile void *address)
+ {
+ unsigned long phys = virt_to_phys(address);
+ unsigned long bus = phys + __direct_map_base;
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index 89faca0e740d0..bfa58409a4d4d 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -525,6 +525,11 @@ alternative_endif
+ #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
+ #endif
+
++#ifdef CONFIG_KASAN_HW_TAGS
++#define EXPORT_SYMBOL_NOHWKASAN(name)
++#else
++#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
++#endif
+ /*
+ * Emit a 64-bit absolute little endian symbol reference in a way that
+ * ensures that it will be resolved at build time, even when building a
+diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
+index 58c7f80f55961..c724a288a412d 100644
+--- a/arch/arm64/include/asm/mte.h
++++ b/arch/arm64/include/asm/mte.h
+@@ -105,11 +105,17 @@ void mte_check_tfsr_el1(void);
+
+ static inline void mte_check_tfsr_entry(void)
+ {
++ if (!system_supports_mte())
++ return;
++
+ mte_check_tfsr_el1();
+ }
+
+ static inline void mte_check_tfsr_exit(void)
+ {
++ if (!system_supports_mte())
++ return;
++
+ /*
+ * The asynchronous faults are sync'ed automatically with
+ * TFSR_EL1 on kernel entry but for exit an explicit dsb()
+diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
+index 3a3264ff47b97..95f7686b728d7 100644
+--- a/arch/arm64/include/asm/string.h
++++ b/arch/arm64/include/asm/string.h
+@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
+ #define __HAVE_ARCH_STRCHR
+ extern char *strchr(const char *, int c);
+
++#ifndef CONFIG_KASAN_HW_TAGS
+ #define __HAVE_ARCH_STRCMP
+ extern int strcmp(const char *, const char *);
+
+ #define __HAVE_ARCH_STRNCMP
+ extern int strncmp(const char *, const char *, __kernel_size_t);
++#endif
+
+ #define __HAVE_ARCH_STRLEN
+ extern __kernel_size_t strlen(const char *);
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 0ead8bfedf201..92c99472d2c90 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1500,9 +1500,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ /*
+ * For reasons that aren't entirely clear, enabling KPTI on Cavium
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+- * ends as well as you might imagine. Don't even try.
++ * ends as well as you might imagine. Don't even try. We cannot rely
++ * on the cpus_have_*cap() helpers here to detect the CPU erratum
++ * because cpucap detection order may change. However, since we know
++ * affected CPUs are always in a homogeneous configuration, it is
++ * safe to rely on this_cpu_has_cap() here.
+ */
+- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
++ if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 36f51b0e438a6..d223df11fc00b 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -173,12 +173,7 @@ bool mte_report_once(void)
+ #ifdef CONFIG_KASAN_HW_TAGS
+ void mte_check_tfsr_el1(void)
+ {
+- u64 tfsr_el1;
+-
+- if (!system_supports_mte())
+- return;
+-
+- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
++ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+
+ if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
+ /*
+@@ -221,6 +216,9 @@ void mte_thread_init_user(void)
+
+ void mte_thread_switch(struct task_struct *next)
+ {
++ if (!system_supports_mte())
++ return;
++
+ /*
+ * Check if an async tag exception occurred at EL1.
+ *
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index c8989b999250d..c858b857c1ecf 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -60,7 +60,7 @@
+
+ #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ #include <linux/stackprotector.h>
+-unsigned long __stack_chk_guard __read_mostly;
++unsigned long __stack_chk_guard __ro_after_init;
+ EXPORT_SYMBOL(__stack_chk_guard);
+ #endif
+
+diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
+index d7bee210a798a..83bcad72ec972 100644
+--- a/arch/arm64/lib/strcmp.S
++++ b/arch/arm64/lib/strcmp.S
+@@ -173,4 +173,4 @@ L(done):
+ ret
+
+ SYM_FUNC_END_PI(strcmp)
+-EXPORT_SYMBOL_NOKASAN(strcmp)
++EXPORT_SYMBOL_NOHWKASAN(strcmp)
+diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
+index 48d44f7fddb13..e42bcfcd37e6f 100644
+--- a/arch/arm64/lib/strncmp.S
++++ b/arch/arm64/lib/strncmp.S
+@@ -258,4 +258,4 @@ L(ret0):
+ ret
+
+ SYM_FUNC_END_PI(strncmp)
+-EXPORT_SYMBOL_NOKASAN(strncmp)
++EXPORT_SYMBOL_NOHWKASAN(strncmp)
+diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
+index 911826ea83ce1..80eb2396d01eb 100644
+--- a/arch/m68k/include/asm/raw_io.h
++++ b/arch/m68k/include/asm/raw_io.h
+@@ -17,21 +17,21 @@
+ * two accesses to memory, which may be undesirable for some devices.
+ */
+ #define in_8(addr) \
+- ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
++ ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
+ #define in_be16(addr) \
+- ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
++ ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
+ #define in_be32(addr) \
+- ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
++ ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
+ #define in_le16(addr) \
+- ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
++ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
+ #define in_le32(addr) \
+- ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
++ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
+
+-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
+-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
+-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
+-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
+-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
++#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
++#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
++#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
++#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
++#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
+
+ #define raw_inb in_8
+ #define raw_inw in_be16
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index d00313d1274e8..0561568f7b489 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -184,7 +184,7 @@ extern int npmem_ranges;
+ #include <asm-generic/getorder.h>
+ #include <asm/pdc.h>
+
+-#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
++#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
+
+ /* DEFINITION OF THE ZERO-PAGE (PAG0) */
+ /* based on work by Jason Eckhardt (jason@equator.com) */
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 8e1d72a167594..7ceae24b0ca99 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -356,7 +356,9 @@ err_nomem:
+ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+ {
+- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
++ size = PAGE_ALIGN(size);
++
++ if (!sparc_dma_free_resource(cpu_addr, size))
+ return;
+
+ dma_make_coherent(dma_addr, size);
+diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
+index 8e645ddac58e2..30f171b7b00c2 100644
+--- a/arch/sparc/kernel/mdesc.c
++++ b/arch/sparc/kernel/mdesc.c
+@@ -39,6 +39,7 @@ struct mdesc_hdr {
+ u32 node_sz; /* node block size */
+ u32 name_sz; /* name block size */
+ u32 data_sz; /* data block size */
++ char data[];
+ } __attribute__((aligned(16)));
+
+ struct mdesc_elem {
+@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
+
+ static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
+ {
+- return (struct mdesc_elem *) (mdesc + 1);
++ return (struct mdesc_elem *) mdesc->data;
+ }
+
+ static void *name_block(struct mdesc_hdr *mdesc)
+diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
+index 5c7bcaa796232..1d5f14aff5f6f 100644
+--- a/arch/x86/include/asm/pkeys.h
++++ b/arch/x86/include/asm/pkeys.h
+@@ -2,8 +2,6 @@
+ #ifndef _ASM_X86_PKEYS_H
+ #define _ASM_X86_PKEYS_H
+
+-#define ARCH_DEFAULT_PKEY 0
+-
+ /*
+ * If more than 16 keys are ever supported, a thorough audit
+ * will be necessary to ensure that the types that store key
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index f3fbb84ff8a77..68c257a3de0d3 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
+ {
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } __iomem *__dst = dst;
+- int zf;
++ bool zf;
+
+ /*
+ * ENQCMDS %(rdx), rax
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index bff3a784aec5b..d103e8489ec17 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -839,6 +839,20 @@ void __init setup_arch(char **cmdline_p)
+
+ x86_init.oem.arch_setup();
+
++ /*
++ * Do some memory reservations *before* memory is added to memblock, so
++ * memblock allocations won't overwrite it.
++ *
++ * After this point, everything still needed from the boot loader or
++ * firmware or kernel text should be early reserved or marked not RAM in
++ * e820. All other memory is free game.
++ *
++ * This call needs to happen before e820__memory_setup() which calls the
++ * xen_memory_setup() on Xen dom0 which relies on the fact that those
++ * early reservations have happened already.
++ */
++ early_reserve_memory();
++
+ iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
+ e820__memory_setup();
+ parse_setup_data();
+@@ -885,18 +899,6 @@ void __init setup_arch(char **cmdline_p)
+
+ parse_early_param();
+
+- /*
+- * Do some memory reservations *before* memory is added to
+- * memblock, so memblock allocations won't overwrite it.
+- * Do it after early param, so we could get (unlikely) panic from
+- * serial.
+- *
+- * After this point everything still needed from the boot loader or
+- * firmware or kernel text should be early reserved or marked not
+- * RAM in e820. All other memory is free game.
+- */
+- early_reserve_memory();
+-
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Memory used by the kernel cannot be hot-removed because Linux
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index b2eefdefc1083..84a2c8c4af735 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -710,7 +710,8 @@ oops:
+
+ static noinline void
+ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
+- unsigned long address, int signal, int si_code)
++ unsigned long address, int signal, int si_code,
++ u32 pkey)
+ {
+ WARN_ON_ONCE(user_mode(regs));
+
+@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
+
+ set_signal_archinfo(address, error_code);
+
+- /* XXX: hwpoison faults will set the wrong code. */
+- force_sig_fault(signal, si_code, (void __user *)address);
++ if (si_code == SEGV_PKUERR) {
++ force_sig_pkuerr((void __user *)address, pkey);
++ } else {
++ /* XXX: hwpoison faults will set the wrong code. */
++ force_sig_fault(signal, si_code, (void __user *)address);
++ }
+ }
+
+ /*
+@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ struct task_struct *tsk = current;
+
+ if (!user_mode(regs)) {
+- kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
++ kernelmode_fixup_or_oops(regs, error_code, address,
++ SIGSEGV, si_code, pkey);
+ return;
+ }
+
+@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ {
+ /* Kernel mode? Handle exceptions or die: */
+ if (!user_mode(regs)) {
+- kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
++ kernelmode_fixup_or_oops(regs, error_code, address,
++ SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
+ return;
+ }
+
+@@ -1396,7 +1403,8 @@ good_area:
+ */
+ if (!user_mode(regs))
+ kernelmode_fixup_or_oops(regs, error_code, address,
+- SIGBUS, BUS_ADRERR);
++ SIGBUS, BUS_ADRERR,
++ ARCH_DEFAULT_PKEY);
+ return;
+ }
+
+@@ -1416,7 +1424,8 @@ good_area:
+ return;
+
+ if (fatal_signal_pending(current) && !user_mode(regs)) {
+- kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
++ kernelmode_fixup_or_oops(regs, error_code, address,
++ 0, 0, ARCH_DEFAULT_PKEY);
+ return;
+ }
+
+@@ -1424,7 +1433,8 @@ good_area:
+ /* Kernel mode? Handle exceptions or die: */
+ if (!user_mode(regs)) {
+ kernelmode_fixup_or_oops(regs, error_code, address,
+- SIGSEGV, SEGV_MAPERR);
++ SIGSEGV, SEGV_MAPERR,
++ ARCH_DEFAULT_PKEY);
+ return;
+ }
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 475d9c71b1713..d8aaccc9a246d 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -756,8 +756,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
+ preempt_enable();
+ }
+
+-static void xen_convert_trap_info(const struct desc_ptr *desc,
+- struct trap_info *traps)
++static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
++ struct trap_info *traps, bool full)
+ {
+ unsigned in, out, count;
+
+@@ -767,17 +767,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
+ for (in = out = 0; in < count; in++) {
+ gate_desc *entry = (gate_desc *)(desc->address) + in;
+
+- if (cvt_gate_to_trap(in, entry, &traps[out]))
++ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
+ out++;
+ }
+- traps[out].address = 0;
++
++ return out;
+ }
+
+ void xen_copy_trap_info(struct trap_info *traps)
+ {
+ const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
+
+- xen_convert_trap_info(desc, traps);
++ xen_convert_trap_info(desc, traps, true);
+ }
+
+ /* Load a new IDT into Xen. In principle this can be per-CPU, so we
+@@ -787,6 +788,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
+ {
+ static DEFINE_SPINLOCK(lock);
+ static struct trap_info traps[257];
++ unsigned out;
+
+ trace_xen_cpu_load_idt(desc);
+
+@@ -794,7 +796,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
+
+ memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
+
+- xen_convert_trap_info(desc, traps);
++ out = xen_convert_trap_info(desc, traps, false);
++ memset(&traps[out], 0, sizeof(traps[0]));
+
+ xen_mc_flush();
+ if (HYPERVISOR_set_trap_table(traps))
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 26446f97deee4..28e11decbac58 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1385,10 +1385,14 @@ enomem:
+ /* alloc failed, nothing's initialized yet, free everything */
+ spin_lock_irq(&q->queue_lock);
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
++ struct blkcg *blkcg = blkg->blkcg;
++
++ spin_lock(&blkcg->lock);
+ if (blkg->pd[pol->plid]) {
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
++ spin_unlock(&blkcg->lock);
+ }
+ spin_unlock_irq(&q->queue_lock);
+ ret = -ENOMEM;
+@@ -1420,12 +1424,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ __clear_bit(pol->plid, q->blkcg_pols);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
++ struct blkcg *blkcg = blkg->blkcg;
++
++ spin_lock(&blkcg->lock);
+ if (blkg->pd[pol->plid]) {
+ if (pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg->pd[pol->plid]);
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
++ spin_unlock(&blkcg->lock);
+ }
+
+ spin_unlock_irq(&q->queue_lock);
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 410da060d1f5a..9e83159f5a527 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
+ */
+ void blk_integrity_unregister(struct gendisk *disk)
+ {
++ struct blk_integrity *bi = &disk->queue->integrity;
++
++ if (!bi->profile)
++ return;
++
++ /* ensure all bios are off the integrity workqueue */
++ blk_flush_integrity();
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+- memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
++ memset(bi, 0, sizeof(*bi));
+ }
+ EXPORT_SYMBOL(blk_integrity_unregister);
+
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 86f87346232a6..ff5caeb825429 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
+
+ spin_lock_irqsave(&tags->lock, flags);
+ rq = tags->rqs[bitnr];
+- if (!rq || !refcount_inc_not_zero(&rq->ref))
++ if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
+ rq = NULL;
+ spin_unlock_irqrestore(&tags->lock, flags);
+ return rq;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index bcec598b89f23..9edb776249efd 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
+ }
+
+ static void binder_transaction_buffer_release(struct binder_proc *proc,
++ struct binder_thread *thread,
+ struct binder_buffer *buffer,
+ binder_size_t failed_at,
+ bool is_failure)
+@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ &proc->alloc, &fd, buffer,
+ offset, sizeof(fd));
+ WARN_ON(err);
+- if (!err)
++ if (!err) {
+ binder_deferred_fd_close(fd);
++ /*
++ * Need to make sure the thread goes
++ * back to userspace to complete the
++ * deferred close
++ */
++ if (thread)
++ thread->looper_need_return = true;
++ }
+ }
+ } break;
+ default:
+@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
+ if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
+ binder_inner_proc_lock(target_proc);
+- if (target_thread->is_dead || target_proc->is_frozen) {
+- return_error = target_thread->is_dead ?
+- BR_DEAD_REPLY : BR_FROZEN_REPLY;
++ if (target_thread->is_dead) {
++ return_error = BR_DEAD_REPLY;
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
+@@ -3105,7 +3113,7 @@ err_bad_parent:
+ err_copy_data_failed:
+ binder_free_txn_fixups(t);
+ trace_binder_transaction_failed_buffer_release(t->buffer);
+- binder_transaction_buffer_release(target_proc, t->buffer,
++ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
+ buffer_offset, true);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
+@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
+ * Cleanup buffer and free it.
+ */
+ static void
+-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
++binder_free_buf(struct binder_proc *proc,
++ struct binder_thread *thread,
++ struct binder_buffer *buffer)
+ {
+ binder_inner_proc_lock(proc);
+ if (buffer->transaction) {
+@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+ binder_node_inner_unlock(buf_node);
+ }
+ trace_binder_transaction_buffer_release(buffer);
+- binder_transaction_buffer_release(proc, buffer, 0, false);
++ binder_transaction_buffer_release(proc, thread, buffer, 0, false);
+ binder_alloc_free_buf(&proc->alloc, buffer);
+ }
+
+@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ proc->pid, thread->pid, (u64)data_ptr,
+ buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+- binder_free_buf(proc, buffer);
++ binder_free_buf(proc, thread, buffer);
+ break;
+ }
+
+@@ -4107,7 +4117,7 @@ retry:
+ buffer->transaction = NULL;
+ binder_cleanup_transaction(t, "fd fixups failed",
+ BR_FAILED_REPLY);
+- binder_free_buf(proc, buffer);
++ binder_free_buf(proc, thread, buffer);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
+ proc->pid, thread->pid,
+@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ return 0;
+ }
+
++static bool binder_txns_pending_ilocked(struct binder_proc *proc)
++{
++ struct rb_node *n;
++ struct binder_thread *thread;
++
++ if (proc->outstanding_txns > 0)
++ return true;
++
++ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
++ thread = rb_entry(n, struct binder_thread, rb_node);
++ if (thread->transaction_stack)
++ return true;
++ }
++ return false;
++}
++
+ static int binder_ioctl_freeze(struct binder_freeze_info *info,
+ struct binder_proc *target_proc)
+ {
+@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
+ (!target_proc->outstanding_txns),
+ msecs_to_jiffies(info->timeout_ms));
+
+- if (!ret && target_proc->outstanding_txns)
+- ret = -EAGAIN;
++ /* Check pending transactions that wait for reply */
++ if (ret >= 0) {
++ binder_inner_proc_lock(target_proc);
++ if (binder_txns_pending_ilocked(target_proc))
++ ret = -EAGAIN;
++ binder_inner_proc_unlock(target_proc);
++ }
+
+ if (ret < 0) {
+ binder_inner_proc_lock(target_proc);
+@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
+ {
+ struct binder_proc *target_proc;
+ bool found = false;
++ __u32 txns_pending;
+
+ info->sync_recv = 0;
+ info->async_recv = 0;
+@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
+ if (target_proc->pid == info->pid) {
+ found = true;
+ binder_inner_proc_lock(target_proc);
+- info->sync_recv |= target_proc->sync_recv;
++ txns_pending = binder_txns_pending_ilocked(target_proc);
++ info->sync_recv |= target_proc->sync_recv |
++ (txns_pending << 1);
+ info->async_recv |= target_proc->async_recv;
+ binder_inner_proc_unlock(target_proc);
+ }
+diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
+index 810c0b84d3f81..402c4d4362a83 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -378,6 +378,8 @@ struct binder_ref {
+ * binder transactions
+ * (protected by @inner_lock)
+ * @sync_recv: process received sync transactions since last frozen
++ * bit 0: received sync transaction after being frozen
++ * bit 1: new pending sync transaction during freezing
+ * (protected by @inner_lock)
+ * @async_recv: process received async transactions since last frozen
+ * (protected by @inner_lock)
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index d1f1a82401207..bdb50a06c82ae 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -1113,6 +1113,9 @@ int device_create_managed_software_node(struct device *dev,
+ to_swnode(fwnode)->managed = true;
+ set_secondary_fwnode(dev, fwnode);
+
++ if (device_is_registered(dev))
++ software_node_notify(dev, KOBJ_ADD);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index df77b6bf5c641..763cea8418f8e 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
+ mutex_lock(&dev->mutex);
+ rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ mutex_unlock(&dev->mutex);
++ kfree(insns);
+ return rc;
+ }
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index bb4549959b113..e7cd3882bda4d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -3251,11 +3251,15 @@ static int __init intel_pstate_init(void)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+- if (no_load)
+- return -ENODEV;
+-
+ id = x86_match_cpu(hwp_support_ids);
+ if (id) {
++ bool hwp_forced = intel_pstate_hwp_is_enabled();
++
++ if (hwp_forced)
++ pr_info("HWP enabled by BIOS\n");
++ else if (no_load)
++ return -ENODEV;
++
+ copy_cpu_funcs(&core_funcs);
+ /*
+ * Avoid enabling HWP for processors without EPP support,
+@@ -3265,8 +3269,7 @@ static int __init intel_pstate_init(void)
+ * If HWP is enabled already, though, there is no choice but to
+ * deal with it.
+ */
+- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
+- intel_pstate_hwp_is_enabled()) {
++ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+ hwp_active++;
+ hwp_mode_bdw = id->driver_data;
+ intel_pstate.attr = hwp_cpufreq_attrs;
+@@ -3278,7 +3281,11 @@ static int __init intel_pstate_init(void)
+
+ goto hwp_cpu_matched;
+ }
++ pr_info("HWP not enabled\n");
+ } else {
++ if (no_load)
++ return -ENODEV;
++
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id) {
+ pr_info("CPU model not supported\n");
+@@ -3357,10 +3364,9 @@ static int __init intel_pstate_setup(char *str)
+ else if (!strcmp(str, "passive"))
+ default_driver = &intel_cpufreq;
+
+- if (!strcmp(str, "no_hwp")) {
+- pr_info("HWP disabled\n");
++ if (!strcmp(str, "no_hwp"))
+ no_hwp = 1;
+- }
++
+ if (!strcmp(str, "force"))
+ force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
+index fc1153ab1ebbc..b8a7d9594afd4 100644
+--- a/drivers/edac/dmc520_edac.c
++++ b/drivers/edac/dmc520_edac.c
+@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
+ dimm->grain = pvt->mem_width_in_bytes;
+ dimm->dtype = dt;
+ dimm->mtype = mt;
+- dimm->edac_mode = EDAC_FLAG_SECDED;
++ dimm->edac_mode = EDAC_SECDED;
+ dimm->nr_pages = pages_per_rank / csi->nr_channels;
+ }
+ }
+diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
+index 7e7146b22c160..7d08627e738b3 100644
+--- a/drivers/edac/synopsys_edac.c
++++ b/drivers/edac/synopsys_edac.c
+@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
+
+ for (j = 0; j < csi->nr_channels; j++) {
+ dimm = csi->channels[j]->dimm;
+- dimm->edac_mode = EDAC_FLAG_SECDED;
++ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = p_data->get_mtype(priv->baseaddr);
+ dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
+ dimm->grain = SYNPS_EDAC_ERR_GRAIN;
+diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
+index 1afb41aa20d71..ea2ec3c6815cb 100644
+--- a/drivers/fpga/machxo2-spi.c
++++ b/drivers/fpga/machxo2-spi.c
+@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
+ goto fail;
+
+ get_status(spi, &status);
+- if (test_bit(FAIL, &status))
++ if (test_bit(FAIL, &status)) {
++ ret = -EINVAL;
+ goto fail;
++ }
+ dump_status_reg(&status);
+
+ spi_message_init(&msg);
+@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
+ dump_status_reg(&status);
+ if (!test_bit(DONE, &status)) {
+ machxo2_cleanup(mgr);
++ ret = -EINVAL;
+ goto fail;
+ }
+
+@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
+ break;
+ if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
+ machxo2_cleanup(mgr);
++ ret = -EINVAL;
+ goto fail;
+ }
+ } while (1);
+diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
+index f99f3c10bed03..39dca147d587a 100644
+--- a/drivers/gpio/gpio-uniphier.c
++++ b/drivers/gpio/gpio-uniphier.c
+@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
+
+- return irq_chip_mask_parent(data);
++ irq_chip_mask_parent(data);
+ }
+
+ static void uniphier_gpio_irq_unmask(struct irq_data *data)
+@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
+
+- return irq_chip_unmask_parent(data);
++ irq_chip_unmask_parent(data);
+ }
+
+ static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 411525ac4cc45..47712b6903b51 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
+
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+ if (ret)
+- gpiochip_free_own_desc(desc);
++ dev_warn(chip->parent,
++ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
++ pin, ret);
+
+- return ret ? ERR_PTR(ret) : desc;
++ return desc;
+ }
+
+ static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 9e52948d49920..5a872adcfdb98 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -447,6 +447,7 @@ static const struct kfd_device_info navi10_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 145,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -465,6 +466,7 @@ static const struct kfd_device_info navi12_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 145,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -483,6 +485,7 @@ static const struct kfd_device_info navi14_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 145,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -501,6 +504,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 4,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -519,6 +523,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -536,7 +541,8 @@ static const struct kfd_device_info vangogh_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+- .needs_pci_atomics = false,
++ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+@@ -555,6 +561,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -573,6 +580,7 @@ static const struct kfd_device_info beige_goby_device_info = {
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+@@ -590,7 +598,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+- .needs_pci_atomics = false,
++ .needs_pci_atomics = true,
++ .no_atomic_fw_version = 92,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+@@ -659,20 +668,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ if (!kfd)
+ return NULL;
+
+- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+- * 32 and 64-bit requests are possible and must be
+- * supported.
+- */
+- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
+- if (device_info->needs_pci_atomics &&
+- !kfd->pci_atomic_requested) {
+- dev_info(kfd_device,
+- "skipped device %x:%x, PCI rejects atomics\n",
+- pdev->vendor, pdev->device);
+- kfree(kfd);
+- return NULL;
+- }
+-
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+ kfd->pdev = pdev;
+@@ -772,6 +767,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+ - kfd->vm_info.first_vmid_kfd + 1;
+
++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
++ * 32 and 64-bit requests are possible and must be
++ * supported.
++ */
++ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
++ if (!kfd->pci_atomic_requested &&
++ kfd->device_info->needs_pci_atomics &&
++ (!kfd->device_info->no_atomic_fw_version ||
++ kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
++ dev_info(kfd_device,
++ "skipped device %x:%x, PCI rejects atomics %d<%d\n",
++ kfd->pdev->vendor, kfd->pdev->device,
++ kfd->mec_fw_version,
++ kfd->device_info->no_atomic_fw_version);
++ return false;
++ }
++
+ /* Verify module parameters regarding mapped process number*/
+ if ((hws_max_conc_proc < 0)
+ || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 3426743ed228b..b38a84a274387 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -206,6 +206,7 @@ struct kfd_device_info {
+ bool supports_cwsr;
+ bool needs_iommu_device;
+ bool needs_pci_atomics;
++ uint32_t no_atomic_fw_version;
+ unsigned int num_sdma_engines;
+ unsigned int num_xgmi_sdma_engines;
+ unsigned int num_sdma_queues_per_engine;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 0f7f1e5621ea4..e85035fd1ccb4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -118,8 +118,16 @@ static void svm_range_remove_notifier(struct svm_range *prange)
+ mmu_interval_notifier_remove(&prange->notifier);
+ }
+
++static bool
++svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
++{
++ return dma_addr && !dma_mapping_error(dev, dma_addr) &&
++ !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
++}
++
+ static int
+ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
++ unsigned long offset, unsigned long npages,
+ unsigned long *hmm_pfns, uint32_t gpuidx)
+ {
+ enum dma_data_direction dir = DMA_BIDIRECTIONAL;
+@@ -136,9 +144,9 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
+ prange->dma_addr[gpuidx] = addr;
+ }
+
+- for (i = 0; i < prange->npages; i++) {
+- if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
+- "leaking dma mapping\n"))
++ addr += offset;
++ for (i = 0; i < npages; i++) {
++ if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
+ dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
+
+ page = hmm_pfn_to_page(hmm_pfns[i]);
+@@ -167,6 +175,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
+
+ static int
+ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
++ unsigned long offset, unsigned long npages,
+ unsigned long *hmm_pfns)
+ {
+ struct kfd_process *p;
+@@ -187,7 +196,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
+ }
+ adev = (struct amdgpu_device *)pdd->dev->kgd;
+
+- r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
++ r = svm_range_dma_map_dev(adev, prange, offset, npages,
++ hmm_pfns, gpuidx);
+ if (r)
+ break;
+ }
+@@ -205,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
+ return;
+
+ for (i = offset; i < offset + npages; i++) {
+- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
++ if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
+ continue;
+ pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+@@ -1088,11 +1098,6 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
+ pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
+
+ pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
+-
+- pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
+- prange->svms, prange->start, prange->last,
+- (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
+-
+ return pte_flags;
+ }
+
+@@ -1156,7 +1161,8 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
+
+ static int
+ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+- struct svm_range *prange, dma_addr_t *dma_addr,
++ struct svm_range *prange, unsigned long offset,
++ unsigned long npages, bool readonly, dma_addr_t *dma_addr,
+ struct amdgpu_device *bo_adev, struct dma_fence **fence)
+ {
+ struct amdgpu_bo_va bo_va;
+@@ -1165,16 +1171,17 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned long last_start;
+ int last_domain;
+ int r = 0;
+- int64_t i;
++ int64_t i, j;
+
+- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
+- prange->last);
++ last_start = prange->start + offset;
++
++ pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
++ last_start, last_start + npages - 1, readonly);
+
+ if (prange->svm_bo && prange->ttm_res)
+ bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
+
+- last_start = prange->start;
+- for (i = 0; i < prange->npages; i++) {
++ for (i = offset; i < offset + npages; i++) {
+ last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
+ dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
+ if ((prange->start + i) < prange->last &&
+@@ -1183,15 +1190,27 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+
+ pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
+ last_start, prange->start + i, last_domain ? "GPU" : "CPU");
++
+ pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
+- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
+- last_start,
++ if (readonly)
++ pte_flags &= ~AMDGPU_PTE_WRITEABLE;
++
++ pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
++ prange->svms, last_start, prange->start + i,
++ (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
++ pte_flags);
++
++ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
++ NULL, last_start,
+ prange->start + i, pte_flags,
+ last_start - prange->start,
+- NULL,
+- dma_addr,
++ NULL, dma_addr,
+ &vm->last_update,
+ &table_freed);
++
++ for (j = last_start - prange->start; j <= i; j++)
++ dma_addr[j] |= last_domain;
++
+ if (r) {
+ pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
+ goto out;
+@@ -1220,8 +1239,10 @@ out:
+ return r;
+ }
+
+-static int svm_range_map_to_gpus(struct svm_range *prange,
+- unsigned long *bitmap, bool wait)
++static int
++svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
++ unsigned long npages, bool readonly,
++ unsigned long *bitmap, bool wait)
+ {
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *bo_adev;
+@@ -1257,7 +1278,8 @@ static int svm_range_map_to_gpus(struct svm_range *prange,
+ }
+
+ r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+- prange, prange->dma_addr[gpuidx],
++ prange, offset, npages, readonly,
++ prange->dma_addr[gpuidx],
+ bo_adev, wait ? &fence : NULL);
+ if (r)
+ break;
+@@ -1390,7 +1412,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+ int32_t gpuidx, bool intr, bool wait)
+ {
+ struct svm_validate_context ctx;
+- struct hmm_range *hmm_range;
++ unsigned long start, end, addr;
+ struct kfd_process *p;
+ void *owner;
+ int32_t idx;
+@@ -1448,40 +1470,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+ break;
+ }
+ }
+- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+- prange->start << PAGE_SHIFT,
+- prange->npages, &hmm_range,
+- false, true, owner);
+- if (r) {
+- pr_debug("failed %d to get svm range pages\n", r);
+- goto unreserve_out;
+- }
+
+- r = svm_range_dma_map(prange, ctx.bitmap,
+- hmm_range->hmm_pfns);
+- if (r) {
+- pr_debug("failed %d to dma map range\n", r);
+- goto unreserve_out;
+- }
++ start = prange->start << PAGE_SHIFT;
++ end = (prange->last + 1) << PAGE_SHIFT;
++ for (addr = start; addr < end && !r; ) {
++ struct hmm_range *hmm_range;
++ struct vm_area_struct *vma;
++ unsigned long next;
++ unsigned long offset;
++ unsigned long npages;
++ bool readonly;
+
+- prange->validated_once = true;
++ vma = find_vma(mm, addr);
++ if (!vma || addr < vma->vm_start) {
++ r = -EFAULT;
++ goto unreserve_out;
++ }
++ readonly = !(vma->vm_flags & VM_WRITE);
+
+- svm_range_lock(prange);
+- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+- pr_debug("hmm update the range, need validate again\n");
+- r = -EAGAIN;
+- goto unlock_out;
+- }
+- if (!list_empty(&prange->child_list)) {
+- pr_debug("range split by unmap in parallel, validate again\n");
+- r = -EAGAIN;
+- goto unlock_out;
+- }
++ next = min(vma->vm_end, end);
++ npages = (next - addr) >> PAGE_SHIFT;
++ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
++ addr, npages, &hmm_range,
++ readonly, true, owner);
++ if (r) {
++ pr_debug("failed %d to get svm range pages\n", r);
++ goto unreserve_out;
++ }
++
++ offset = (addr - start) >> PAGE_SHIFT;
++ r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
++ hmm_range->hmm_pfns);
++ if (r) {
++ pr_debug("failed %d to dma map range\n", r);
++ goto unreserve_out;
++ }
++
++ svm_range_lock(prange);
++ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
++ pr_debug("hmm update the range, need validate again\n");
++ r = -EAGAIN;
++ goto unlock_out;
++ }
++ if (!list_empty(&prange->child_list)) {
++ pr_debug("range split by unmap in parallel, validate again\n");
++ r = -EAGAIN;
++ goto unlock_out;
++ }
+
+- r = svm_range_map_to_gpus(prange, ctx.bitmap, wait);
++ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
++ ctx.bitmap, wait);
+
+ unlock_out:
+- svm_range_unlock(prange);
++ svm_range_unlock(prange);
++
++ addr = next;
++ }
++
++ if (addr == end)
++ prange->validated_once = true;
++
+ unreserve_out:
+ svm_range_unreserve_bos(&ctx);
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6a4c6c47dcfaf..3bb567ea2cef9 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7514,6 +7514,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ }
+ }
+
++static void amdgpu_set_panel_orientation(struct drm_connector *connector)
++{
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++ const struct drm_display_mode *native_mode;
++
++ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
++ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
++ return;
++
++ encoder = amdgpu_dm_connector_to_encoder(connector);
++ if (!encoder)
++ return;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ native_mode = &amdgpu_encoder->native_mode;
++ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
++ return;
++
++ drm_connector_set_panel_orientation_with_quirk(connector,
++ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
++ native_mode->hdisplay,
++ native_mode->vdisplay);
++}
++
+ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+ {
+@@ -7542,6 +7568,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
++
++ amdgpu_set_panel_orientation(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+@@ -8051,8 +8079,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+- /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+- * hot-plug, headless s3, dpms
++ /* Stream removed and re-enabled
++ *
++ * Can sometimes overlap with the HPD case,
++ * thus set update_hdcp to false to avoid
++ * setting HDCP multiple times.
++ *
++ * Handles: DESIRED -> DESIRED (Special case)
++ */
++ if (!(old_state->crtc && old_state->crtc->enabled) &&
++ state->crtc && state->crtc->enabled &&
++ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
++ dm_con_state->update_hdcp = false;
++ return true;
++ }
++
++ /* Hot-plug, headless s3, dpms
++ *
++ * Only start HDCP if the display is connected/enabled.
++ * update_hdcp flag will be set to false until the next
++ * HPD comes in.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+@@ -10469,7 +10515,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ goto fail;
+ status = dc_validate_global_state(dc, dm_state->context, false);
+ if (status != DC_OK) {
+- DC_LOG_WARNING("DC global validation failure: %s (%d)",
++ drm_dbg_atomic(dev,
++ "DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
+ ret = -EINVAL;
+ goto fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index a6d0fd24fd02d..83ef72a3ebf41 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1849,9 +1849,13 @@ bool perform_link_training_with_retries(
+ dp_disable_link_phy(link, signal);
+
+ /* Abort link training if failure due to sink being unplugged. */
+- if (status == LINK_TRAINING_ABORT)
+- break;
+- else if (do_fallback) {
++ if (status == LINK_TRAINING_ABORT) {
++ enum dc_connection_type type = dc_connection_none;
++
++ dc_link_detect_sink(link, &type);
++ if (type == dc_connection_none)
++ break;
++ } else if (do_fallback) {
+ decide_fallback_link_setting(*link_setting, &current_setting, status);
+ /* Fail link training if reduced link bandwidth no longer meets
+ * stream requirements.
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+index 15c0b8af376f8..6e8fe1242752d 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+@@ -6870,6 +6870,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(adev);
+
++ ni_update_current_ps(adev, boot_ps);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index cb38b1a17b098..82cbb29a05aa3 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
++ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
++ num_pages;
+ order = min_t(unsigned int, order, __fls(num_pages))) {
+ bool apply_caching = false;
+ struct ttm_pool_type *pt;
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index f91d37beb1133..3b391dee30445 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -166,8 +166,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ bool connected = false;
+
+- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+-
+ if (vc4_hdmi->hpd_gpio &&
+ gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
+ connected = true;
+@@ -188,12 +186,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ }
+ }
+
+- pm_runtime_put(&vc4_hdmi->pdev->dev);
+ return connector_status_connected;
+ }
+
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+- pm_runtime_put(&vc4_hdmi->pdev->dev);
+ return connector_status_disconnected;
+ }
+
+@@ -635,6 +631,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ vc4_hdmi->variant->phy_disable(vc4_hdmi);
+
+ clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+
+ ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
+@@ -945,6 +942,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ return;
+ }
+
++ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
++ if (ret) {
++ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
++ clk_disable_unprepare(vc4_hdmi->pixel_clock);
++ return;
++ }
++
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
+ if (pixel_rate > 297000000)
+@@ -957,6 +961,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
+ if (ret) {
+ DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+@@ -964,6 +969,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+@@ -2110,29 +2116,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+ return 0;
+ }
+
+-#ifdef CONFIG_PM
+-static int vc4_hdmi_runtime_suspend(struct device *dev)
+-{
+- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+-
+- clk_disable_unprepare(vc4_hdmi->hsm_clock);
+-
+- return 0;
+-}
+-
+-static int vc4_hdmi_runtime_resume(struct device *dev)
+-{
+- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+- int ret;
+-
+- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+- if (ret)
+- return ret;
+-
+- return 0;
+-}
+-#endif
+-
+ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ {
+ const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
+@@ -2380,18 +2363,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
+ {}
+ };
+
+-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+- vc4_hdmi_runtime_resume,
+- NULL)
+-};
+-
+ struct platform_driver vc4_hdmi_driver = {
+ .probe = vc4_hdmi_dev_probe,
+ .remove = vc4_hdmi_dev_remove,
+ .driver = {
+ .name = "vc4_hdmi",
+ .of_match_table = vc4_hdmi_dt_match,
+- .pm = &vc4_hdmi_pm_ops,
+ },
+ };
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index 4d5924e9f7666..aca7b595c4c78 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
+ config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
++ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 7557ab5512953..53e0fb0562c11 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
+ ARMADA_370_XP_SW_TRIG_INT_OFFS);
+ }
+
+-static void armada_370_xp_ipi_eoi(struct irq_data *d)
++static void armada_370_xp_ipi_ack(struct irq_data *d)
+ {
+ writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ }
+
+ static struct irq_chip ipi_irqchip = {
+ .name = "IPI",
++ .irq_ack = armada_370_xp_ipi_ack,
+ .irq_mask = armada_370_xp_ipi_mask,
+ .irq_unmask = armada_370_xp_ipi_unmask,
+- .irq_eoi = armada_370_xp_ipi_eoi,
+ .ipi_send_mask = armada_370_xp_ipi_send_mask,
+ };
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index ba39668c3e085..51584f4cccf46 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+
+ if (err) {
+ if (i > 0)
+- its_vpe_irq_domain_free(domain, virq, i - 1);
++ its_vpe_irq_domain_free(domain, virq, i);
+
+ its_lpi_free(bitmap, base, nr_ids);
+ its_free_prop_table(vprop_page);
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 38fbb3b598731..38cc8340e817d 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -277,8 +277,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
+
+ bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
+ if (bus_nr < 0) {
+- rc = bus_nr;
+- goto err_free;
++ kfree(bus);
++ return ERR_PTR(bus_nr);
+ }
+
+ bus->bus_nr = bus_nr;
+@@ -293,12 +293,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
+ dev_set_name(&bus->dev, "mcb:%d", bus_nr);
+ rc = device_add(&bus->dev);
+ if (rc)
+- goto err_free;
++ goto err_put;
+
+ return bus;
+-err_free:
+- put_device(carrier);
+- kfree(bus);
++
++err_put:
++ put_device(&bus->dev);
+ return ERR_PTR(rc);
+ }
+ EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index ae8fe54ea3581..6c0c3d0d905aa 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
+ disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->events |= DISK_EVENT_MEDIA_CHANGE;
+ mddev->gendisk = disk;
+- /* As soon as we call add_disk(), another thread could get
+- * through to md_open, so make sure it doesn't get too far
+- */
+- mutex_lock(&mddev->open_mutex);
+ add_disk(disk);
+
+ error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
+@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ pr_debug("pointless warning\n");
+- mutex_unlock(&mddev->open_mutex);
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error && mddev->kobj.sd) {
+diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
+index dae9eeed84a2b..89edc936b544b 100644
+--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
++++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
+@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
+ struct device *tty_dev;
+
+ tty_port_init(&vk->tty[i].port);
+- tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
+- i, dev);
++ tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
++ tty_drv, i, dev, vk,
++ NULL);
+ if (IS_ERR(tty_dev)) {
+ err = PTR_ERR(tty_dev);
+ goto unwind;
+ }
+- dev_set_drvdata(tty_dev, vk);
+ vk->tty[i].is_opened = false;
+ }
+
+diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
+index 2e1befbd1ad99..693981891870c 100644
+--- a/drivers/misc/genwqe/card_base.c
++++ b/drivers/misc/genwqe/card_base.c
+@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
+
+ /* check for 64-bit DMA address supported (DAC) */
+ /* check for 32-bit DMA address supported (SAC) */
+- if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
++ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pci_dev->dev,
+ "err: neither DMA32 nor DMA64 supported\n");
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 111a6d5985da6..1c122a1f2f97d 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3012,7 +3012,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
+ {
+ mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
+- mv88e6xxx_teardown_devlink_regions(ds);
++ mv88e6xxx_teardown_devlink_regions_global(ds);
+ }
+
+ static int mv88e6xxx_setup(struct dsa_switch *ds)
+@@ -3147,7 +3147,7 @@ unlock:
+ if (err)
+ goto out_resources;
+
+- err = mv88e6xxx_setup_devlink_regions(ds);
++ err = mv88e6xxx_setup_devlink_regions_global(ds);
+ if (err)
+ goto out_params;
+
+@@ -3161,6 +3161,16 @@ out_resources:
+ return err;
+ }
+
++static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
++{
++ return mv88e6xxx_setup_devlink_regions_port(ds, port);
++}
++
++static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
++{
++ mv88e6xxx_teardown_devlink_regions_port(ds, port);
++}
++
+ /* prod_id for switch families which do not have a PHY model number */
+ static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+@@ -6055,6 +6065,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+ .change_tag_protocol = mv88e6xxx_change_tag_protocol,
+ .setup = mv88e6xxx_setup,
+ .teardown = mv88e6xxx_teardown,
++ .port_setup = mv88e6xxx_port_setup,
++ .port_teardown = mv88e6xxx_port_teardown,
+ .phylink_validate = mv88e6xxx_validate,
+ .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
+ .phylink_mac_config = mv88e6xxx_mac_config,
+diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
+index 0c0f5ea6680c3..381068395c63b 100644
+--- a/drivers/net/dsa/mv88e6xxx/devlink.c
++++ b/drivers/net/dsa/mv88e6xxx/devlink.c
+@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+ },
+ };
+
+-static void
+-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
++void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
+ {
++ struct mv88e6xxx_chip *chip = ds->priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+ dsa_devlink_region_destroy(chip->regions[i]);
+ }
+
+-static void
+-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
+- int port)
++void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
+ {
++ struct mv88e6xxx_chip *chip = ds->priv;
++
+ dsa_devlink_region_destroy(chip->ports[port].region);
+ }
+
+-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
+- struct mv88e6xxx_chip *chip,
+- int port)
++int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
+ {
++ struct mv88e6xxx_chip *chip = ds->priv;
+ struct devlink_region *region;
+
+ region = dsa_devlink_port_region_create(ds,
+@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
+ return 0;
+ }
+
+-static void
+-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
+-{
+- int port;
+-
+- for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
+- mv88e6xxx_teardown_devlink_regions_port(chip, port);
+-}
+-
+-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
+- struct mv88e6xxx_chip *chip)
+-{
+- int port;
+- int err;
+-
+- for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+- err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
+- if (err)
+- goto out;
+- }
+-
+- return 0;
+-
+-out:
+- while (port-- > 0)
+- mv88e6xxx_teardown_devlink_regions_port(chip, port);
+-
+- return err;
+-}
+-
+-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+- struct mv88e6xxx_chip *chip)
++int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
+ {
+ bool (*cond)(struct mv88e6xxx_chip *chip);
++ struct mv88e6xxx_chip *chip = ds->priv;
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+@@ -753,30 +722,6 @@ out:
+ return PTR_ERR(region);
+ }
+
+-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
+-{
+- struct mv88e6xxx_chip *chip = ds->priv;
+- int err;
+-
+- err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
+- if (err)
+- return err;
+-
+- err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
+- if (err)
+- mv88e6xxx_teardown_devlink_regions_global(chip);
+-
+- return err;
+-}
+-
+-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
+-{
+- struct mv88e6xxx_chip *chip = ds->priv;
+-
+- mv88e6xxx_teardown_devlink_regions_ports(chip);
+- mv88e6xxx_teardown_devlink_regions_global(chip);
+-}
+-
+ int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
+index 3d72db3dcf950..65ce6a6858b9f 100644
+--- a/drivers/net/dsa/mv88e6xxx/devlink.h
++++ b/drivers/net/dsa/mv88e6xxx/devlink.h
+@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
+-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
++int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
++void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
++int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
++void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
+
+ int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
+index 8e49d4f85d48c..6bf46d76c0281 100644
+--- a/drivers/net/dsa/realtek-smi-core.c
++++ b/drivers/net/dsa/realtek-smi-core.c
+@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
+ smi->slave_mii_bus->parent = smi->dev;
+ smi->ds->slave_mii_bus = smi->slave_mii_bus;
+
+- ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
++ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(smi->dev, "unable to register MDIO bus %s\n",
+ smi->slave_mii_bus->id);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+index f26d037356191..5b996330f228b 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+@@ -419,13 +419,13 @@ static int atl_resume_common(struct device *dev, bool deep)
+ if (deep) {
+ /* Reinitialize Nic/Vecs objects */
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
++ }
+
++ if (netif_running(nic->ndev)) {
+ ret = aq_nic_init(nic);
+ if (ret)
+ goto err_exit;
+- }
+
+- if (netif_running(nic->ndev)) {
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+index 85fa0ab7201c7..9513cfb5ba58c 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
+ bcma_set_drvdata(core, bgmac);
+
+ err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
++ if (err == -EPROBE_DEFER)
++ return err;
+
+ /* If no MAC address assigned via device tree, check SPROM */
+ if (err) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index fdbf47446a997..f20b57b8cd70e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -385,7 +385,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
++ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+@@ -758,7 +758,7 @@ next_tx_int:
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
++ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
+ }
+@@ -2375,7 +2375,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
++ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
+ rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+@@ -3531,7 +3531,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
+ u16 i;
+
+ bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+- MAX_SKB_FRAGS + 1);
++ BNXT_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index ba4e0fc38520c..d4dca4508d268 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -615,6 +615,11 @@ struct nqe_cn {
+ #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+ #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
++/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
++ * BD because the first TX BD is always a long BD.
++ */
++#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
++
+ #define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+ #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 786ca51e669bc..3a8c284635922 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -784,7 +784,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
+
+ if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+ (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+- (ering->tx_pending <= MAX_SKB_FRAGS))
++ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
+ return -EINVAL;
+
+ if (netif_running(dev))
+diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
+index 8b7b59908a1ab..f66d22de5168d 100644
+--- a/drivers/net/ethernet/cadence/macb_pci.c
++++ b/drivers/net/ethernet/cadence/macb_pci.c
+@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
+ struct platform_device *plat_dev = pci_get_drvdata(pdev);
+ struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
+
+- platform_device_unregister(plat_dev);
+ clk_unregister(plat_data->pclk);
+ clk_unregister(plat_data->hclk);
++ platform_device_unregister(plat_dev);
+ }
+
+ static const struct pci_device_id dev_id_table[] = {
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 3ca93adb96628..042327b9981fa 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
+
+ static void enetc_rx_net_dim(struct enetc_int_vector *v)
+ {
+- struct dim_sample dim_sample;
++ struct dim_sample dim_sample = {};
+
+ v->comp_cnt++;
+
+@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+ {
+ struct pci_dev *pdev = priv->si->pdev;
+- cpumask_t cpu_mask;
+ int i, j, err;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+
+ enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+ }
+- cpumask_clear(&cpu_mask);
+- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+- irq_set_affinity_hint(irq, &cpu_mask);
++ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
+ }
+
+ return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+index ec9a7f8bc3fed..2eeafd61a07ee 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -1878,12 +1878,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+ return;
+ }
+
+- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
++ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
+ vf_id, q_id);
+
+ if (vf_id) {
+ if (vf_id >= hdev->num_alloc_vport) {
+- dev_err(dev, "invalid vf id(%u)\n", vf_id);
++ dev_err(dev, "invalid vport(%u)\n", vf_id);
+ return;
+ }
+
+@@ -1896,8 +1896,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+
+ ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
+ if (ret)
+- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+- hdev->vport->vport_id, ret);
++ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
++ vf_id, ret);
+ } else {
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 72d55c028ac4b..90a72c79fec99 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3660,7 +3660,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%u) rst failed %d!\n",
+- vport->vport_id, ret);
++ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
++ ret);
+ return ret;
+ }
+
+@@ -3675,7 +3676,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%u) failed %d!\n",
+- vport->vport_id, ret);
++ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
++ ret);
+ }
+
+ return 0;
+@@ -4734,6 +4736,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
+ return 0;
+ }
+
++static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
++ u8 *hash_algo)
++{
++ switch (hfunc) {
++ case ETH_RSS_HASH_TOP:
++ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
++ return 0;
++ case ETH_RSS_HASH_XOR:
++ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
++ return 0;
++ case ETH_RSS_HASH_NO_CHANGE:
++ *hash_algo = vport->rss_algo;
++ return 0;
++ default:
++ return -EINVAL;
++ }
++}
++
+ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+ {
+@@ -4743,30 +4763,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ u8 hash_algo;
+ int ret, i;
+
++ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
++ if (ret) {
++ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
++ return ret;
++ }
++
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+- switch (hfunc) {
+- case ETH_RSS_HASH_TOP:
+- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+- break;
+- case ETH_RSS_HASH_XOR:
+- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+- break;
+- case ETH_RSS_HASH_NO_CHANGE:
+- hash_algo = vport->rss_algo;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+ ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
+- vport->rss_algo = hash_algo;
++ } else {
++ ret = hclge_set_rss_algo_key(hdev, hash_algo,
++ vport->rss_hash_key);
++ if (ret)
++ return ret;
+ }
++ vport->rss_algo = hash_algo;
+
+ /* Update the shadow RSS table with user specified qids */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+@@ -6620,10 +6637,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
+ u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ u16 tqps;
+
++ /* To keep consistent with user's configuration, minus 1 when
++ * printing 'vf', because vf id from ethtool is added 1 for vf.
++ */
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+- "Error: vf id (%u) > max vf num (%u)\n",
+- vf, hdev->num_req_vfs);
++ "Error: vf id (%u) should be less than %u\n",
++ vf - 1, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+@@ -9790,6 +9810,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ if (is_kill && !vlan_id)
+ return 0;
+
++ if (vlan_id >= VLAN_N_VID)
++ return -EINVAL;
++
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+@@ -10696,7 +10719,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
+ return 0;
+ }
+
+-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
++static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
++ u8 *reset_status)
+ {
+ struct hclge_reset_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+@@ -10714,7 +10738,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+ return ret;
+ }
+
+- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
++ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
++
++ return 0;
+ }
+
+ u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
+@@ -10733,7 +10759,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 reset_try_times = 0;
+- int reset_status;
++ u8 reset_status;
+ u16 queue_gid;
+ int ret;
+ u16 i;
+@@ -10749,7 +10775,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
+ }
+
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+- reset_status = hclge_get_reset_status(hdev, queue_gid);
++ ret = hclge_get_reset_status(hdev, queue_gid,
++ &reset_status);
++ if (ret)
++ return ret;
++
+ if (reset_status)
+ break;
+
+@@ -11442,11 +11472,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+- /* Send cmd to clear VF's FUNC_RST_ING */
++ /* Send cmd to clear vport's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+- "clear vf(%u) rst failed %d!\n",
++ "clear vport(%u) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index 0dbed35645eda..c1a4b79a70504 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -564,7 +564,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
+ struct hclge_dev *hdev = vport->back;
+
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
+- vport->vport_id);
++ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
+ }
+@@ -588,9 +588,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+ {
++ struct hnae3_handle *handle = &vport->nic;
++ struct hclge_dev *hdev = vport->back;
+ u16 queue_id, qid_in_pf;
+
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
++ if (queue_id >= handle->kinfo.num_tqps) {
++ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
++ queue_id, mbx_req->mbx_src_vfid);
++ return;
++ }
++
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ resp_msg->len = sizeof(qid_in_pf);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 78d5bf1ea5610..44618cc4cca10 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
++ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index be3ea7023ed8c..22cf66004dfa2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -814,40 +814,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ return 0;
+ }
+
++static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
++ u8 *hash_algo)
++{
++ switch (hfunc) {
++ case ETH_RSS_HASH_TOP:
++ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
++ return 0;
++ case ETH_RSS_HASH_XOR:
++ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
++ return 0;
++ case ETH_RSS_HASH_NO_CHANGE:
++ *hash_algo = hdev->rss_cfg.hash_algo;
++ return 0;
++ default:
++ return -EINVAL;
++ }
++}
++
+ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+ {
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
++ u8 hash_algo;
+ int ret, i;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
++ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
++ if (ret)
++ return ret;
++
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+- switch (hfunc) {
+- case ETH_RSS_HASH_TOP:
+- rss_cfg->hash_algo =
+- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+- break;
+- case ETH_RSS_HASH_XOR:
+- rss_cfg->hash_algo =
+- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+- break;
+- case ETH_RSS_HASH_NO_CHANGE:
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+- key);
+- if (ret)
++ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
++ if (ret) {
++ dev_err(&hdev->pdev->dev,
++ "invalid hfunc type %u\n", hfunc);
+ return ret;
++ }
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key,
+ HCLGEVF_RSS_KEY_SIZE);
++ } else {
++ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
++ rss_cfg->rss_hash_key);
++ if (ret)
++ return ret;
+ }
++ rss_cfg->hash_algo = hash_algo;
+ }
+
+ /* update the shadow RSS table with user specified qids */
+diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
+index fc8c7cd674712..8b12a5ab3818c 100644
+--- a/drivers/net/ethernet/i825xx/82596.c
++++ b/drivers/net/ethernet/i825xx/82596.c
+@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
+ err = -ENODEV;
+ goto out;
+ }
+- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
++ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
+ dev->base_addr = MVME_I596_BASE;
+ dev->irq = (unsigned) MVME16x_IRQ_I596;
+ goto found;
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index b5f68f66d42a8..7bb1f20002b58 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
+ int hash;
+ int i;
+
++ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
++ return -EEXIST;
++
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 5d0c9c62382dc..1e672bc36c4dc 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ int nhoff = skb_network_offset(skb);
+ int ret = 0;
+
++ if (skb->encapsulation)
++ return -EPROTONOSUPPORT;
++
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 2948d731a1c1c..512dff9551669 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1260,14 +1260,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
+ return mask;
+ }
+
+-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
++static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
+ struct net_device *bridge)
+ {
++ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
+ u32 mask = 0;
+ int port;
+
++ if (!ocelot_port || ocelot_port->bridge != bridge ||
++ ocelot_port->stp_state != BR_STATE_FORWARDING)
++ return 0;
++
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+- struct ocelot_port *ocelot_port = ocelot->ports[port];
++ ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+@@ -1333,7 +1338,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+ struct net_device *bridge = ocelot_port->bridge;
+ struct net_device *bond = ocelot_port->bond;
+
+- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
++ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
+ mask |= cpu_fwd_mask;
+ mask &= ~BIT(port);
+ if (bond) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index a99861124630a..68fbe536a1f32 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
+ prev_weight = weight;
+
+ while (weight) {
++ /* If the HW device is during recovery, all resources are
++ * immediately reset without receiving a per-cid indication
++ * from HW. In this case we don't expect the cid_map to be
++ * cleared.
++ */
++ if (p_hwfn->cdev->recov_in_prog)
++ return 0;
++
+ msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index f16a157bb95a0..cf5baa5e59bcc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
++ /* If the HW device is during recovery, all resources are
++ * immediately reset without receiving a per-cid indication
++ * from HW. In this case we don't expect the cid bitmap to be
++ * cleared.
++ */
++ if (p_hwfn->cdev->recov_in_prog)
++ return;
++
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 0dbd189c2721d..2218bc3a624b4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+ priv->clk_csr = STMMAC_CSR_100_150M;
+ else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+ priv->clk_csr = STMMAC_CSR_150_250M;
+- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
++ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
+ priv->clk_csr = STMMAC_CSR_250_300M;
+ }
+
+diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
+index 8fe8887d506a3..6192244b304ab 100644
+--- a/drivers/net/hamradio/6pack.c
++++ b/drivers/net/hamradio/6pack.c
+@@ -68,9 +68,9 @@
+ #define SIXP_DAMA_OFF 0
+
+ /* default level 2 parameters */
+-#define SIXP_TXDELAY (HZ/4) /* in 1 s */
++#define SIXP_TXDELAY 25 /* 250 ms */
+ #define SIXP_PERSIST 50 /* in 256ths */
+-#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
++#define SIXP_SLOTTIME 10 /* 100 ms */
+ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
+ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 42e5a681183f3..0d3d9c3ee83c8 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1604,6 +1604,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
+ if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
++ /* If this link is with an SFP, ensure that changes to advertised modes
++ * also cause the associated interface to be selected such that the
++ * link can be configured correctly.
++ */
++ if (pl->sfp_port && pl->sfp_bus) {
++ config.interface = sfp_select_interface(pl->sfp_bus,
++ config.advertising);
++ if (config.interface == PHY_INTERFACE_MODE_NA) {
++ phylink_err(pl,
++ "selection of interface failed, advertisement %*pb\n",
++ __ETHTOOL_LINK_MODE_MASK_NBITS,
++ config.advertising);
++ return -EINVAL;
++ }
++
++ /* Revalidate with the selected interface */
++ linkmode_copy(support, pl->supported);
++ if (phylink_validate(pl, support, &config)) {
++ phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
++ phylink_an_mode_str(pl->cur_link_an_mode),
++ phy_modes(config.interface),
++ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
++ return -EINVAL;
++ }
++ }
++
+ mutex_lock(&pl->state_mutex);
+ pl->link_config.speed = config.speed;
+ pl->link_config.duplex = config.duplex;
+@@ -2183,7 +2209,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
+ if (phy_interface_mode_is_8023z(iface) && pl->phydev)
+ return -EINVAL;
+
+- changed = !linkmode_equal(pl->supported, support);
++ changed = !linkmode_equal(pl->supported, support) ||
++ !linkmode_equal(pl->link_config.advertising,
++ config.advertising);
+ if (changed) {
+ linkmode_copy(pl->supported, support);
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 18e0ca85f6537..3c7120ec70798 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2720,14 +2720,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
+
+ serial = kzalloc(sizeof(*serial), GFP_KERNEL);
+ if (!serial)
+- goto exit;
++ goto err_free_dev;
+
+ hso_dev->port_data.dev_serial = serial;
+ serial->parent = hso_dev;
+
+ if (hso_serial_common_create
+ (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
+- goto exit;
++ goto err_free_serial;
+
+ serial->tx_data_length--;
+ serial->write_data = hso_mux_serial_write_data;
+@@ -2743,11 +2743,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
+ /* done, return it */
+ return hso_dev;
+
+-exit:
+- if (serial) {
+- tty_unregister_device(tty_drv, serial->minor);
+- kfree(serial);
+- }
++err_free_serial:
++ kfree(serial);
++err_free_dev:
+ kfree(hso_dev);
+ return NULL;
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index eee493685aad5..fb96658bb91ff 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -435,6 +435,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+
+ skb_reserve(skb, p - buf);
+ skb_put(skb, len);
++
++ page = (struct page *)page->private;
++ if (page)
++ give_pages(rq, page);
+ goto ok;
+ }
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 5a8df5a195cb5..141635a35c28a 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+ LIST_HEAD(list);
+ unsigned int h;
+
+- rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ }
++ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ vxlan_destroy_tunnels(net, &list);
+
+diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
+index 250d56f204c3e..e62b1a0916d89 100644
+--- a/drivers/nfc/st-nci/spi.c
++++ b/drivers/nfc/st-nci/spi.c
+@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
+
+ static struct spi_device_id st_nci_spi_id_table[] = {
+ {ST_NCI_SPI_DRIVER_NAME, 0},
++ {"st21nfcb-spi", 0},
+ {}
+ };
+ MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 84e7cb9f19681..e2374319df61a 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -13,7 +13,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/backing-dev.h>
+-#include <linux/list_sort.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/pr.h>
+@@ -3688,15 +3687,6 @@ out_unlock:
+ return ret;
+ }
+
+-static int ns_cmp(void *priv, const struct list_head *a,
+- const struct list_head *b)
+-{
+- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
+- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
+-
+- return nsa->head->ns_id - nsb->head->ns_id;
+-}
+-
+ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ {
+ struct nvme_ns *ns, *ret = NULL;
+@@ -3717,6 +3707,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ }
+ EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
+
++/*
++ * Add the namespace to the controller list while keeping the list ordered.
++ */
++static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
++{
++ struct nvme_ns *tmp;
++
++ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
++ if (tmp->head->ns_id < ns->head->ns_id) {
++ list_add(&ns->list, &tmp->list);
++ return;
++ }
++ }
++ list_add(&ns->list, &ns->ctrl->namespaces);
++}
++
+ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_ns_ids *ids)
+ {
+@@ -3778,9 +3784,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ }
+
+ down_write(&ctrl->namespaces_rwsem);
+- list_add_tail(&ns->list, &ctrl->namespaces);
++ nvme_ns_add_to_ctrl_list(ns);
+ up_write(&ctrl->namespaces_rwsem);
+-
+ nvme_get_ctrl(ctrl);
+
+ device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+@@ -4059,10 +4064,6 @@ static void nvme_scan_work(struct work_struct *work)
+ if (nvme_scan_ns_list(ctrl) != 0)
+ nvme_scan_ns_sequential(ctrl);
+ mutex_unlock(&ctrl->scan_lock);
+-
+- down_write(&ctrl->namespaces_rwsem);
+- list_sort(NULL, &ctrl->namespaces, ns_cmp);
+- up_write(&ctrl->namespaces_rwsem);
+ }
+
+ /*
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 3f32c5e86bfcb..abc9bdfd48bde 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -583,14 +583,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+- unsigned nsid = le32_to_cpu(desc->nsids[n]);
+-
++ unsigned nsid;
++again:
++ nsid = le32_to_cpu(desc->nsids[n]);
+ if (ns->head->ns_id < nsid)
+ continue;
+ if (ns->head->ns_id == nsid)
+ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
++ if (ns->head->ns_id > nsid)
++ goto again;
+ }
+ up_read(&ctrl->namespaces_rwsem);
+ return 0;
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index a68704e39084e..042c594bc57e2 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+ if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
+- nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
++ nvme_rdma_destroy_queue_ib(queue);
+ mutex_destroy(&queue->queue_lock);
+ }
+
+@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
+ for (i = 0; i < queue->queue_size; i++) {
+ ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
+ if (ret)
+- goto out_destroy_queue_ib;
++ return ret;
+ }
+
+ return 0;
+-
+-out_destroy_queue_ib:
+- nvme_rdma_destroy_queue_ib(queue);
+- return ret;
+ }
+
+ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
+@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "rdma_connect_locked failed (%d).\n", ret);
+- goto out_destroy_queue_ib;
++ return ret;
+ }
+
+ return 0;
+-
+-out_destroy_queue_ib:
+- nvme_rdma_destroy_queue_ib(queue);
+- return ret;
+ }
+
+ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+- nvme_rdma_destroy_queue_ib(queue);
+- fallthrough;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "CM error event %d\n", ev->event);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 19a711395cdc3..fd28a23d45ed6 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -614,7 +614,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+ cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
+ data->ttag = pdu->ttag;
+ data->command_id = nvme_cid(rq);
+- data->data_offset = cpu_to_le32(req->data_sent);
++ data->data_offset = pdu->r2t_offset;
+ data->data_length = cpu_to_le32(req->pdu_len);
+ return 0;
+ }
+@@ -940,7 +940,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ nvme_tcp_ddgst_update(queue->snd_hash, page,
+ offset, ret);
+
+- /* fully successful last write*/
++ /*
++ * update the request iterator except for the last payload send
++ * in the request where we don't want to modify it as we may
++ * compete with the RX path completing the request.
++ */
++ if (req->data_sent + ret < req->data_len)
++ nvme_tcp_advance_req(req, ret);
++
++ /* fully successful last send in current PDU */
+ if (last && ret == len) {
+ if (queue->data_digest) {
+ nvme_tcp_ddgst_final(queue->snd_hash,
+@@ -952,7 +960,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ }
+ return 1;
+ }
+- nvme_tcp_advance_req(req, ret);
+ }
+ return -EAGAIN;
+ }
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index fa88bf9cba4d0..3e5053c5ec836 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+ {
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+- return snprintf(page, PAGE_SIZE, "%*s\n",
++ return snprintf(page, PAGE_SIZE, "%.*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
+ }
+
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index 3481479a2942f..d6a7c896ac866 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -71,7 +71,7 @@
+ #define AMD_CPU_ID_YC 0x14B5
+
+ #define PMC_MSG_DELAY_MIN_US 100
+-#define RESPONSE_REGISTER_LOOP_MAX 200
++#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+ #define SOC_SUBSYSTEM_IP_MAX 12
+ #define DELAY_MIN_US 2000
+diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
+index 9e7314d90bea8..1e3da9700005e 100644
+--- a/drivers/platform/x86/dell/Kconfig
++++ b/drivers/platform/x86/dell/Kconfig
+@@ -166,8 +166,7 @@ config DELL_WMI
+
+ config DELL_WMI_PRIVACY
+ bool "Dell WMI Hardware Privacy Support"
+- depends on DELL_WMI
+- depends on LEDS_TRIGGER_AUDIO
++ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
+ help
+ This option adds integration with the "Dell Hardware Privacy"
+ feature of Dell laptops to the dell-wmi driver.
+diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
+index f58b8543f6ac5..66bb39fd0ef90 100644
+--- a/drivers/platform/x86/intel_punit_ipc.c
++++ b/drivers/platform/x86/intel_punit_ipc.c
+@@ -8,7 +8,6 @@
+ * which provide mailbox interface for power management usage.
+ */
+
+-#include <linux/acpi.h>
+ #include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
+ .remove = intel_punit_ipc_remove,
+ .driver = {
+ .name = "intel_punit_ipc",
+- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
++ .acpi_match_table = punit_ipc_acpi_ids,
+ },
+ };
+
+diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
+index 1d78b455cc48c..e34face736f48 100644
+--- a/drivers/regulator/max14577-regulator.c
++++ b/drivers/regulator/max14577-regulator.c
+@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
+ MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+ MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform:max14577-regulator");
+-MODULE_ALIAS("platform:max77836-regulator");
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 6cca910a76ded..7f458d510483f 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
+- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
++ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
+ {}
+ };
+
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 62f88ccbd03f8..51f7f4e680c34 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -207,6 +207,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
+ &card->qdio.in_buf_pool.entry_list, list)
+ list_del(&pool_entry->list);
+
++ if (!queue)
++ return;
++
+ for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
+ queue->bufs[i].pool_entry = NULL;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index eb88aaaf36eb3..c34a7f7446013 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -6022,7 +6022,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+ len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
+ phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
++ len += scnprintf(buf + len, PAGE_SIZE - len,
++ "Cfg: %d SCSI: %d NVME: %d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+ return len;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index f8f471157109e..70b507d177f14 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -7014,7 +7014,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+- if (!qla_dual_mode_enabled(vha))
++ if (!qla_dual_mode_enabled(vha) &&
++ !qla_ini_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index d8b05d8b54708..922e4c7bd88e4 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ struct iscsi_transport *t = iface->transport;
+ int param = -1;
+
+- if (attr == &dev_attr_iface_enabled.attr)
+- param = ISCSI_NET_PARAM_IFACE_ENABLE;
+- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
++ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_iface_header_digest.attr)
+ param = ISCSI_IFACE_PARAM_HDRDGST_EN;
+@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ if (param != -1)
+ return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+- if (attr == &dev_attr_iface_vlan_id.attr)
++ if (attr == &dev_attr_iface_enabled.attr)
++ param = ISCSI_NET_PARAM_IFACE_ENABLE;
++ else if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 186b5ff52c3ab..06ee1f045e976 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
+
+ /*
+ * Report zone buffer size should be at most 64B times the number of
+- * zones requested plus the 64B reply header, but should be at least
+- * SECTOR_SIZE for ATA devices.
++ * zones requested plus the 64B reply header, but should be aligned
++ * to SECTOR_SIZE for ATA devices.
+ * Make sure that this size does not exceed the hardware capabilities.
+ * Furthermore, since the report zone command cannot be split, make
+ * sure that the allocated buffer can always be mapped by limiting the
+@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
+ *buflen = bufsize;
+ return buf;
+ }
+- bufsize >>= 1;
++ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
+ }
+
+ return NULL;
+@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
+ {
+ struct scsi_disk *sdkp;
+ unsigned long flags;
+- unsigned int zno;
++ sector_t zno;
+ int ret;
+
+ sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 15ac5fa148058..3a204324151a8 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -2112,6 +2112,7 @@ static inline
+ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+ {
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
++ unsigned long flags;
+
+ lrbp->issue_time_stamp = ktime_get();
+ lrbp->compl_time_stamp = ktime_set(0, 0);
+@@ -2120,19 +2121,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+ ufshcd_clk_scaling_start_busy(hba);
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_start_monitor(hba, lrbp);
+- if (ufshcd_has_utrlcnr(hba)) {
+- set_bit(task_tag, &hba->outstanding_reqs);
+- ufshcd_writel(hba, 1 << task_tag,
+- REG_UTP_TRANSFER_REQ_DOOR_BELL);
+- } else {
+- unsigned long flags;
+-
+- spin_lock_irqsave(hba->host->host_lock, flags);
+- set_bit(task_tag, &hba->outstanding_reqs);
+- ufshcd_writel(hba, 1 << task_tag,
+- REG_UTP_TRANSFER_REQ_DOOR_BELL);
+- spin_unlock_irqrestore(hba->host->host_lock, flags);
+- }
++ spin_lock_irqsave(hba->host->host_lock, flags);
++ set_bit(task_tag, &hba->outstanding_reqs);
++ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
++ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+ }
+@@ -5237,10 +5229,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+ /**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+- * @completed_reqs: requests to complete
++ * @completed_reqs: bitmask that indicates which requests to complete
++ * @retry_requests: whether to ask the SCSI core to retry completed requests
+ */
+ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+- unsigned long completed_reqs)
++ unsigned long completed_reqs,
++ bool retry_requests)
+ {
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+@@ -5258,7 +5252,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
+ ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
+- result = ufshcd_transfer_rsp_status(hba, lrbp);
++ result = retry_requests ? DID_BUS_BUSY << 16 :
++ ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+@@ -5282,17 +5277,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ }
+
+ /**
+- * ufshcd_trc_handler - handle transfer requests completion
++ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+- * @use_utrlcnr: get completed requests from UTRLCNR
++ * @retry_requests: whether or not to ask to retry requests
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
++static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
++ bool retry_requests)
+ {
+- unsigned long completed_reqs = 0;
++ unsigned long completed_reqs, flags;
++ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+@@ -5305,27 +5302,14 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ ufshcd_reset_intr_aggr(hba);
+
+- if (use_utrlcnr) {
+- u32 utrlcnr;
+-
+- utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
+- if (utrlcnr) {
+- ufshcd_writel(hba, utrlcnr,
+- REG_UTP_TRANSFER_REQ_LIST_COMPL);
+- completed_reqs = utrlcnr;
+- }
+- } else {
+- unsigned long flags;
+- u32 tr_doorbell;
+-
+- spin_lock_irqsave(hba->host->host_lock, flags);
+- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+- spin_unlock_irqrestore(hba->host->host_lock, flags);
+- }
++ spin_lock_irqsave(hba->host->host_lock, flags);
++ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
++ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
++ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (completed_reqs) {
+- __ufshcd_transfer_req_compl(hba, completed_reqs);
++ __ufshcd_transfer_req_compl(hba, completed_reqs,
++ retry_requests);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+@@ -5804,7 +5788,13 @@ out:
+ /* Complete requests that have door-bell cleared */
+ static void ufshcd_complete_requests(struct ufs_hba *hba)
+ {
+- ufshcd_trc_handler(hba, false);
++ ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
++ ufshcd_tmc_handler(hba);
++}
++
++static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
++{
++ ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
+ ufshcd_tmc_handler(hba);
+ }
+
+@@ -6146,8 +6136,7 @@ static void ufshcd_err_handler(struct work_struct *work)
+ }
+
+ lock_skip_pending_xfer_clear:
+- /* Complete the requests that are cleared by s/w */
+- ufshcd_complete_requests(hba);
++ ufshcd_retry_aborted_requests(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
+@@ -6445,7 +6434,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+ retval |= ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+- retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
++ retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+
+ return retval;
+ }
+@@ -6869,7 +6858,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+- __ufshcd_transfer_req_compl(hba, pos);
++ __ufshcd_transfer_req_compl(hba, 1U << pos, false);
+ }
+ }
+
+@@ -7040,7 +7029,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+- __ufshcd_transfer_req_compl(hba, 1UL << tag);
++ __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ goto release;
+ }
+
+@@ -7105,7 +7094,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+ */
+ ufshcd_hba_stop(hba);
+ hba->silence_err_logs = true;
+- ufshcd_complete_requests(hba);
++ ufshcd_retry_aborted_requests(hba);
+ hba->silence_err_logs = false;
+
+ /* scale up clocks to max frequency before full reinitialization */
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index 194755c9ddfeb..86d4765a17b83 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -1160,11 +1160,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+ }
+
+-static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
+-{
+- return (hba->ufs_version >= ufshci_version(3, 0));
+-}
+-
+ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
+ bool up, enum ufs_notify_change_status status)
+ {
+diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
+index 5affb1fce5ad0..de95be5d11d4e 100644
+--- a/drivers/scsi/ufs/ufshci.h
++++ b/drivers/scsi/ufs/ufshci.h
+@@ -39,7 +39,6 @@ enum {
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+- REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 6a726c95ac7a8..dc1a6899ba3b2 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1206,7 +1206,7 @@ static int tegra_slink_resume(struct device *dev)
+ }
+ #endif
+
+-static int tegra_slink_runtime_suspend(struct device *dev)
++static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
+ {
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+@@ -1218,7 +1218,7 @@ static int tegra_slink_runtime_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int tegra_slink_runtime_resume(struct device *dev)
++static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
+ {
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index e4dc593b1f32a..f95f7666cb5b7 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
+ const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+- len = of_device_modalias(dev, buf, PAGE_SIZE);
+- if (len != -ENODEV)
+- return len;
+-
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
+ const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+- rc = of_device_uevent_modalias(dev, env);
+- if (rc != -ENODEV)
+- return rc;
+-
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
+index 73f01ed1e5b72..a943fce322be8 100644
+--- a/drivers/staging/greybus/uart.c
++++ b/drivers/staging/greybus/uart.c
+@@ -761,6 +761,17 @@ out:
+ gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
+ }
+
++static void gb_tty_port_destruct(struct tty_port *port)
++{
++ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
++
++ if (gb_tty->minor != GB_NUM_MINORS)
++ release_minor(gb_tty);
++ kfifo_free(&gb_tty->write_fifo);
++ kfree(gb_tty->buffer);
++ kfree(gb_tty);
++}
++
+ static const struct tty_operations gb_ops = {
+ .install = gb_tty_install,
+ .open = gb_tty_open,
+@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
+ .dtr_rts = gb_tty_dtr_rts,
+ .activate = gb_tty_port_activate,
+ .shutdown = gb_tty_port_shutdown,
++ .destruct = gb_tty_port_destruct,
+ };
+
+ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ int retval;
+ int minor;
+
+- gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+- if (!gb_tty)
+- return -ENOMEM;
+-
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_uart_request_handler);
+- if (IS_ERR(connection)) {
+- retval = PTR_ERR(connection);
+- goto exit_tty_free;
+- }
++ if (IS_ERR(connection))
++ return PTR_ERR(connection);
+
+ max_payload = gb_operation_get_payload_size_max(connection);
+ if (max_payload < sizeof(struct gb_uart_send_data_request)) {
+@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ goto exit_connection_destroy;
+ }
+
++ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
++ if (!gb_tty) {
++ retval = -ENOMEM;
++ goto exit_connection_destroy;
++ }
++
++ tty_port_init(&gb_tty->port);
++ gb_tty->port.ops = &gb_port_ops;
++ gb_tty->minor = GB_NUM_MINORS;
++
+ gb_tty->buffer_payload_max = max_payload -
+ sizeof(struct gb_uart_send_data_request);
+
+ gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
+ if (!gb_tty->buffer) {
+ retval = -ENOMEM;
+- goto exit_connection_destroy;
++ goto exit_put_port;
+ }
+
+ INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
+@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
+ GFP_KERNEL);
+ if (retval)
+- goto exit_buf_free;
++ goto exit_put_port;
+
+ gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
+ init_completion(&gb_tty->credits_complete);
+@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ } else {
+ retval = minor;
+ }
+- goto exit_kfifo_free;
++ goto exit_put_port;
+ }
+
+ gb_tty->minor = minor;
+@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ init_waitqueue_head(&gb_tty->wioctl);
+ mutex_init(&gb_tty->mutex);
+
+- tty_port_init(&gb_tty->port);
+- gb_tty->port.ops = &gb_port_ops;
+-
+ gb_tty->connection = connection;
+ gb_tty->gbphy_dev = gbphy_dev;
+ gb_connection_set_data(connection, gb_tty);
+@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+
+ retval = gb_connection_enable_tx(connection);
+ if (retval)
+- goto exit_release_minor;
++ goto exit_put_port;
+
+ send_control(gb_tty, gb_tty->ctrlout);
+
+@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+
+ exit_connection_disable:
+ gb_connection_disable(connection);
+-exit_release_minor:
+- release_minor(gb_tty);
+-exit_kfifo_free:
+- kfifo_free(&gb_tty->write_fifo);
+-exit_buf_free:
+- kfree(gb_tty->buffer);
++exit_put_port:
++ tty_port_put(&gb_tty->port);
+ exit_connection_destroy:
+ gb_connection_destroy(connection);
+-exit_tty_free:
+- kfree(gb_tty);
+
+ return retval;
+ }
+@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
+ gb_connection_disable_rx(connection);
+ tty_unregister_device(gb_tty_driver, gb_tty->minor);
+
+- /* FIXME - free transmit / receive buffers */
+-
+ gb_connection_disable(connection);
+- tty_port_destroy(&gb_tty->port);
+ gb_connection_destroy(connection);
+- release_minor(gb_tty);
+- kfifo_free(&gb_tty->write_fifo);
+- kfree(gb_tty->buffer);
+- kfree(gb_tty);
++
++ tty_port_put(&gb_tty->port);
+ }
+
+ static int gb_tty_init(void)
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 102ec644bc8a0..023bd4516a681 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
+ {
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+- bool flag;
++ bool flag, oldflag;
+ int ret;
+
++ ret = strtobool(page, &flag);
++ if (ret < 0)
++ return ret;
++
++ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
++ if (flag == oldflag)
++ return count;
++
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
+ pr_err("dev[%p]: Unable to change SE Device alua_support:"
+ " alua_support has fixed value\n", dev);
+- return -EINVAL;
++ return -ENOSYS;
+ }
+
+- ret = strtobool(page, &flag);
+- if (ret < 0)
+- return ret;
+-
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ else
+@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
+ {
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+- bool flag;
++ bool flag, oldflag;
+ int ret;
+
++ ret = strtobool(page, &flag);
++ if (ret < 0)
++ return ret;
++
++ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
++ if (flag == oldflag)
++ return count;
++
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ pr_err("dev[%p]: Unable to change SE Device pgr_support:"
+ " pgr_support has fixed value\n", dev);
+- return -EINVAL;
++ return -ENOSYS;
+ }
+
+- ret = strtobool(page, &flag);
+- if (ret < 0)
+- return ret;
+-
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ else
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+index 0f0038af2ad48..fb64acfd5e07d 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
+ return 0;
+ }
+
+-static unsigned int tcc_offset_save;
++static int tcc_offset_save = -1;
+
+ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
+ proc_dev = dev_get_drvdata(dev);
+ proc_thermal_read_ppcc(proc_dev);
+
+- tcc_offset_update(tcc_offset_save);
++ if (tcc_offset_save >= 0)
++ tcc_offset_update(tcc_offset_save);
+
+ return 0;
+ }
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 97ef9b040b84a..51374f4e1ccaf 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
+ {
+ struct thermal_governor *pos;
+ ssize_t count = 0;
+- ssize_t size = PAGE_SIZE;
+
+ mutex_lock(&thermal_governor_lock);
+
+ list_for_each_entry(pos, &thermal_governor_list, governor_list) {
+- size = PAGE_SIZE - count;
+- count += scnprintf(buf + count, size, "%s ", pos->name);
++ count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
++ pos->name);
+ }
+- count += scnprintf(buf + count, size, "\n");
++ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+
+ mutex_unlock(&thermal_governor_lock);
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index b6c731a267d26..7223e22c4b886 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -106,7 +106,7 @@
+ #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
+
+ /* RX FIFO occupancy indicator */
+-#define UART_OMAP_RX_LVL 0x64
++#define UART_OMAP_RX_LVL 0x19
+
+ struct omap8250_priv {
+ int line;
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 231de29a64521..ab226da75f7ba 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
+ st = readl(port->membase + UART_STAT);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+- return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
++ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
+ }
+
+ static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
+diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
+index 5bb928b7873e7..2f5fbd7db7cac 100644
+--- a/drivers/tty/synclink_gt.c
++++ b/drivers/tty/synclink_gt.c
+@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
+ static void tdma_reset(struct slgt_info *info);
+ static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+
+-static void get_signals(struct slgt_info *info);
+-static void set_signals(struct slgt_info *info);
++static void get_gtsignals(struct slgt_info *info);
++static void set_gtsignals(struct slgt_info *info);
+ static void set_rate(struct slgt_info *info, u32 data_rate);
+
+ static void bh_transmit(struct slgt_info *info);
+@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock,flags);
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
+ info->signals |= SerialSignal_RTS;
+ spin_lock_irqsave(&info->lock,flags);
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->lock,flags);
+- get_signals(info);
++ get_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ stat_buf[0] = 0;
+@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
+ if (C_CRTSCTS(tty)) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals &= ~SerialSignal_RTS;
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ }
+@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
+ if (C_CRTSCTS(tty)) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals |= SerialSignal_RTS;
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ }
+@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->lock, flags);
+- get_signals(info);
++ get_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+ if (info->signals & SerialSignal_DCD)
+ netif_carrier_on(dev);
+@@ -2232,7 +2232,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
+ if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
+ info->signals &= ~SerialSignal_RTS;
+ info->drop_rts_on_tx_done = false;
+- set_signals(info);
++ set_gtsignals(info);
+ }
+
+ #if SYNCLINK_GENERIC_HDLC
+@@ -2397,7 +2397,7 @@ static void shutdown(struct slgt_info *info)
+
+ if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+- set_signals(info);
++ set_gtsignals(info);
+ }
+
+ flush_cond_wait(&info->gpio_wait_q);
+@@ -2425,7 +2425,7 @@ static void program_hw(struct slgt_info *info)
+ else
+ async_mode(info);
+
+- set_signals(info);
++ set_gtsignals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+@@ -2433,7 +2433,7 @@ static void program_hw(struct slgt_info *info)
+ info->dsr_chkcount = 0;
+
+ slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
+- get_signals(info);
++ get_gtsignals(info);
+
+ if (info->netcount ||
+ (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
+@@ -2670,7 +2670,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
+ spin_lock_irqsave(&info->lock,flags);
+
+ /* return immediately if state matches requested events */
+- get_signals(info);
++ get_gtsignals(info);
+ s = info->signals;
+
+ events = mask &
+@@ -3088,7 +3088,7 @@ static int tiocmget(struct tty_struct *tty)
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+- get_signals(info);
++ get_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+@@ -3127,7 +3127,7 @@ static int tiocmset(struct tty_struct *tty,
+ info->signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->lock,flags);
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+ }
+@@ -3138,7 +3138,7 @@ static int carrier_raised(struct tty_port *port)
+ struct slgt_info *info = container_of(port, struct slgt_info, port);
+
+ spin_lock_irqsave(&info->lock,flags);
+- get_signals(info);
++ get_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return (info->signals & SerialSignal_DCD) ? 1 : 0;
+ }
+@@ -3153,7 +3153,7 @@ static void dtr_rts(struct tty_port *port, int on)
+ info->signals |= SerialSignal_RTS | SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+- set_signals(info);
++ set_gtsignals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+@@ -3951,10 +3951,10 @@ static void tx_start(struct slgt_info *info)
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
+- get_signals(info);
++ get_gtsignals(info);
+ if (!(info->signals & SerialSignal_RTS)) {
+ info->signals |= SerialSignal_RTS;
+- set_signals(info);
++ set_gtsignals(info);
+ info->drop_rts_on_tx_done = true;
+ }
+ }
+@@ -4008,7 +4008,7 @@ static void reset_port(struct slgt_info *info)
+ rx_stop(info);
+
+ info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+- set_signals(info);
++ set_gtsignals(info);
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+ }
+@@ -4430,7 +4430,7 @@ static void tx_set_idle(struct slgt_info *info)
+ /*
+ * get state of V24 status (input) signals
+ */
+-static void get_signals(struct slgt_info *info)
++static void get_gtsignals(struct slgt_info *info)
+ {
+ unsigned short status = rd_reg16(info, SSR);
+
+@@ -4492,7 +4492,7 @@ static void msc_set_vcr(struct slgt_info *info)
+ /*
+ * set state of V24 control (output) signals
+ */
+-static void set_signals(struct slgt_info *info)
++static void set_gtsignals(struct slgt_info *info)
+ {
+ unsigned char val = rd_reg8(info, VCR);
+ if (info->signals & SerialSignal_DTR)
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 5d8c982019afc..1f3b4a1422126 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ return 0;
+ }
+
++static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
++{
++ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
++
++ if (priv_dev->dev_ver < DEV_VER_V3)
++ return;
++
++ if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
++ writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
++ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
++ }
++}
++
+ /**
+ * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
+ * @priv_ep: endpoint object
+@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+ writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
++ cdns3_rearm_drdy_if_needed(priv_ep);
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 4895325b16a46..5b90d0979c607 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -726,7 +726,8 @@ static void acm_port_destruct(struct tty_port *port)
+ {
+ struct acm *acm = container_of(port, struct acm, port);
+
+- acm_release_minor(acm);
++ if (acm->minor != ACM_MINOR_INVALID)
++ acm_release_minor(acm);
+ usb_put_intf(acm->control);
+ kfree(acm->country_codes);
+ kfree(acm);
+@@ -1323,8 +1324,10 @@ made_compressed_probe:
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+- if (minor < 0)
++ if (minor < 0) {
++ acm->minor = ACM_MINOR_INVALID;
+ goto err_put_port;
++ }
+
+ acm->minor = minor;
+ acm->dev = usb_dev;
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index 8aef5eb769a0d..3aa7f0a3ad71e 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -22,6 +22,8 @@
+ #define ACM_TTY_MAJOR 166
+ #define ACM_TTY_MINORS 256
+
++#define ACM_MINOR_INVALID ACM_TTY_MINORS
++
+ /*
+ * Requests.
+ */
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 0f8b7c93310ea..99ff2d23be05e 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2775,6 +2775,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
+ {
+ int retval;
+ struct usb_device *rhdev;
++ struct usb_hcd *shared_hcd;
+
+ if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
+ hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
+@@ -2935,13 +2936,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
+ goto err_hcd_driver_start;
+ }
+
++ /* starting here, usbcore will pay attention to the shared HCD roothub */
++ shared_hcd = hcd->shared_hcd;
++ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
++ retval = register_root_hub(shared_hcd);
++ if (retval != 0)
++ goto err_register_root_hub;
++
++ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
++ usb_hcd_poll_rh_status(shared_hcd);
++ }
++
+ /* starting here, usbcore will pay attention to this root hub */
+- retval = register_root_hub(hcd);
+- if (retval != 0)
+- goto err_register_root_hub;
++ if (!HCD_DEFER_RH_REGISTER(hcd)) {
++ retval = register_root_hub(hcd);
++ if (retval != 0)
++ goto err_register_root_hub;
+
+- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+- usb_hcd_poll_rh_status(hcd);
++ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
++ usb_hcd_poll_rh_status(hcd);
++ }
+
+ return retval;
+
+@@ -2985,6 +2999,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
+ void usb_remove_hcd(struct usb_hcd *hcd)
+ {
+ struct usb_device *rhdev = hcd->self.root_hub;
++ bool rh_registered;
+
+ dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
+
+@@ -2995,6 +3010,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
+
+ dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
+ spin_lock_irq (&hcd_root_hub_lock);
++ rh_registered = hcd->rh_registered;
+ hcd->rh_registered = 0;
+ spin_unlock_irq (&hcd_root_hub_lock);
+
+@@ -3004,7 +3020,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
+ cancel_work_sync(&hcd->died_work);
+
+ mutex_lock(&usb_bus_idr_lock);
+- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
++ if (rh_registered)
++ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ mutex_unlock(&usb_bus_idr_lock);
+
+ /*
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 3146df6e6510d..8f7ee70f5bdcf 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+ */
+ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+ {
++ struct dwc2_hsotg *hsotg = hs_ep->parent;
++ u16 limit = DSTS_SOFFN_LIMIT;
++
++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
++ limit >>= 3;
++
+ hs_ep->target_frame += hs_ep->interval;
+- if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
++ if (hs_ep->target_frame > limit) {
+ hs_ep->frame_overrun = true;
+- hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
++ hs_ep->target_frame &= limit;
+ } else {
+ hs_ep->frame_overrun = false;
+ }
+@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+ */
+ static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
+ {
++ struct dwc2_hsotg *hsotg = hs_ep->parent;
++ u16 limit = DSTS_SOFFN_LIMIT;
++
++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
++ limit >>= 3;
++
+ if (hs_ep->target_frame)
+ hs_ep->target_frame -= 1;
+ else
+- hs_ep->target_frame = DSTS_SOFFN_LIMIT;
++ hs_ep->target_frame = limit;
+ }
+
+ /**
+@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+ dwc2_writel(hsotg, ctrl, depctl);
+ }
+
++static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
++static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
++ struct dwc2_hsotg_ep *hs_ep,
++ struct dwc2_hsotg_req *hs_req,
++ int result);
++
+ /**
+ * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ }
+ }
+
+- if (hs_ep->isochronous && hs_ep->interval == 1) {
+- hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+- dwc2_gadget_incr_frame_num(hs_ep);
+-
+- if (hs_ep->target_frame & 0x1)
+- ctrl |= DXEPCTL_SETODDFR;
+- else
+- ctrl |= DXEPCTL_SETEVENFR;
++ if (hs_ep->isochronous) {
++ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
++ if (hs_ep->interval == 1) {
++ if (hs_ep->target_frame & 0x1)
++ ctrl |= DXEPCTL_SETODDFR;
++ else
++ ctrl |= DXEPCTL_SETEVENFR;
++ }
++ ctrl |= DXEPCTL_CNAK;
++ } else {
++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
++ return;
++ }
+ }
+
+ ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
+@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
+ u32 target_frame = hs_ep->target_frame;
+ u32 current_frame = hsotg->frame_number;
+ bool frame_overrun = hs_ep->frame_overrun;
++ u16 limit = DSTS_SOFFN_LIMIT;
++
++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
++ limit >>= 3;
+
+ if (!frame_overrun && current_frame >= target_frame)
+ return true;
+
+ if (frame_overrun && current_frame >= target_frame &&
+- ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
++ ((current_frame - target_frame) < limit / 2))
+ return true;
+
+ return false;
+@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
+ */
+ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+ {
+- u32 mask;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_hsotg_req *hs_req;
+- u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+
+ if (!list_empty(&hs_ep->queue)) {
+ hs_req = get_ep_head(hs_ep);
+@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+ } else {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
+ __func__);
+- mask = dwc2_readl(hsotg, epmsk_reg);
+- mask |= DOEPMSK_OUTTKNEPDISMSK;
+- dwc2_writel(hsotg, mask, epmsk_reg);
+ }
+ }
+
+@@ -2305,19 +2327,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
+ dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
+ }
+
+-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
+- u32 epctl_reg)
+-{
+- u32 ctrl;
+-
+- ctrl = dwc2_readl(hsotg, epctl_reg);
+- if (ctrl & DXEPCTL_EOFRNUM)
+- ctrl |= DXEPCTL_SETEVENFR;
+- else
+- ctrl |= DXEPCTL_SETODDFR;
+- dwc2_writel(hsotg, ctrl, epctl_reg);
+-}
+-
+ /*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+@@ -2438,20 +2447,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
+- /*
+- * Slave mode OUT transfers do not go through XferComplete so
+- * adjust the ISOC parity here.
+- */
+- if (!using_dma(hsotg)) {
+- if (hs_ep->isochronous && hs_ep->interval == 1)
+- dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
+- else if (hs_ep->isochronous && hs_ep->interval > 1)
+- dwc2_gadget_incr_frame_num(hs_ep);
+- }
+-
+ /* Set actual frame number for completed transfers */
+- if (!using_desc_dma(hsotg) && hs_ep->isochronous)
+- req->frame_number = hsotg->frame_number;
++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
++ req->frame_number = hs_ep->target_frame;
++ dwc2_gadget_incr_frame_num(hs_ep);
++ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+ }
+@@ -2765,6 +2765,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
+ return;
+ }
+
++ /* Set actual frame number for completed transfers */
++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
++ hs_req->req.frame_number = hs_ep->target_frame;
++ dwc2_gadget_incr_frame_num(hs_ep);
++ }
++
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+ }
+
+@@ -2825,23 +2831,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+- if (hs_ep->isochronous) {
+- dwc2_hsotg_complete_in(hsotg, hs_ep);
+- return;
+- }
+-
+ if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
+ int dctl = dwc2_readl(hsotg, DCTL);
+
+ dctl |= DCTL_CGNPINNAK;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
+- return;
+- }
++ } else {
+
+- if (dctl & DCTL_GOUTNAKSTS) {
+- dctl |= DCTL_CGOUTNAK;
+- dwc2_writel(hsotg, dctl, DCTL);
++ if (dctl & DCTL_GOUTNAKSTS) {
++ dctl |= DCTL_CGOUTNAK;
++ dwc2_writel(hsotg, dctl, DCTL);
++ }
+ }
+
+ if (!hs_ep->isochronous)
+@@ -2862,8 +2863,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ } while (dwc2_gadget_target_frame_elapsed(hs_ep));
+-
+- dwc2_gadget_start_next_request(hs_ep);
+ }
+
+ /**
+@@ -2880,8 +2879,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+ {
+ struct dwc2_hsotg *hsotg = ep->parent;
++ struct dwc2_hsotg_req *hs_req;
+ int dir_in = ep->dir_in;
+- u32 doepmsk;
+
+ if (dir_in || !ep->isochronous)
+ return;
+@@ -2895,28 +2894,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+ return;
+ }
+
+- if (ep->interval > 1 &&
+- ep->target_frame == TARGET_FRAME_INITIAL) {
++ if (ep->target_frame == TARGET_FRAME_INITIAL) {
+ u32 ctrl;
+
+ ep->target_frame = hsotg->frame_number;
+- dwc2_gadget_incr_frame_num(ep);
++ if (ep->interval > 1) {
++ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
++ if (ep->target_frame & 0x1)
++ ctrl |= DXEPCTL_SETODDFR;
++ else
++ ctrl |= DXEPCTL_SETEVENFR;
+
+- ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+- if (ep->target_frame & 0x1)
+- ctrl |= DXEPCTL_SETODDFR;
+- else
+- ctrl |= DXEPCTL_SETEVENFR;
++ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
++ }
++ }
++
++ while (dwc2_gadget_target_frame_elapsed(ep)) {
++ hs_req = get_ep_head(ep);
++ if (hs_req)
++ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
+
+- dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
++ dwc2_gadget_incr_frame_num(ep);
++ /* Update current frame number value. */
++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ }
+
+- dwc2_gadget_start_next_request(ep);
+- doepmsk = dwc2_readl(hsotg, DOEPMSK);
+- doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
+- dwc2_writel(hsotg, doepmsk, DOEPMSK);
++ if (!ep->req)
++ dwc2_gadget_start_next_request(ep);
++
+ }
+
++static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
++ struct dwc2_hsotg_ep *hs_ep);
++
+ /**
+ * dwc2_gadget_handle_nak - handle NAK interrupt
+ * @hs_ep: The endpoint on which interrupt is asserted.
+@@ -2934,7 +2944,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+ {
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
++ struct dwc2_hsotg_req *hs_req;
+ int dir_in = hs_ep->dir_in;
++ u32 ctrl;
+
+ if (!dir_in || !hs_ep->isochronous)
+ return;
+@@ -2976,13 +2988,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+
+ dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
+ }
+-
+- dwc2_hsotg_complete_request(hsotg, hs_ep,
+- get_ep_head(hs_ep), 0);
+ }
+
+- if (!using_desc_dma(hsotg))
++ if (using_desc_dma(hsotg))
++ return;
++
++ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
++ if (ctrl & DXEPCTL_EPENA)
++ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
++ else
++ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
++
++ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
++ hs_req = get_ep_head(hs_ep);
++ if (hs_req)
++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
++
+ dwc2_gadget_incr_frame_num(hs_ep);
++ /* Update current frame number value. */
++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
++ }
++
++ if (!hs_ep->req)
++ dwc2_gadget_start_next_request(hs_ep);
+ }
+
+ /**
+@@ -3038,21 +3066,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
+
+ /* In DDMA handle isochronous requests separately */
+ if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+- /* XferCompl set along with BNA */
+- if (!(ints & DXEPINT_BNAINTR))
+- dwc2_gadget_complete_isoc_request_ddma(hs_ep);
++ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ } else if (dir_in) {
+ /*
+ * We get OutDone from the FIFO, so we only
+ * need to look at completing IN requests here
+ * if operating slave mode
+ */
+- if (hs_ep->isochronous && hs_ep->interval > 1)
+- dwc2_gadget_incr_frame_num(hs_ep);
+-
+- dwc2_hsotg_complete_in(hsotg, hs_ep);
+- if (ints & DXEPINT_NAKINTRPT)
+- ints &= ~DXEPINT_NAKINTRPT;
++ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
++ dwc2_hsotg_complete_in(hsotg, hs_ep);
+
+ if (idx == 0 && !hs_ep->req)
+ dwc2_hsotg_enqueue_setup(hsotg);
+@@ -3061,10 +3083,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
+ * We're using DMA, we need to fire an OutDone here
+ * as we ignore the RXFIFO.
+ */
+- if (hs_ep->isochronous && hs_ep->interval > 1)
+- dwc2_gadget_incr_frame_num(hs_ep);
+-
+- dwc2_hsotg_handle_outdone(hsotg, idx);
++ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
++ dwc2_hsotg_handle_outdone(hsotg, idx);
+ }
+ }
+
+@@ -4083,6 +4103,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
+ mask |= DIEPMSK_NAKMSK;
+ dwc2_writel(hsotg, mask, DIEPMSK);
+ } else {
++ epctrl |= DXEPCTL_SNAK;
+ mask = dwc2_readl(hsotg, DOEPMSK);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(hsotg, mask, DOEPMSK);
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index ba74ad7f6995e..2522d15c42447 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ {
+ u32 reg;
+ int retries = 1000;
+- int ret;
+-
+- usb_phy_init(dwc->usb2_phy);
+- usb_phy_init(dwc->usb3_phy);
+- ret = phy_init(dwc->usb2_generic_phy);
+- if (ret < 0)
+- return ret;
+-
+- ret = phy_init(dwc->usb3_generic_phy);
+- if (ret < 0) {
+- phy_exit(dwc->usb2_generic_phy);
+- return ret;
+- }
+
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ udelay(1);
+ } while (--retries);
+
+- phy_exit(dwc->usb3_generic_phy);
+- phy_exit(dwc->usb2_generic_phy);
+-
+ return -ETIMEDOUT;
+
+ done:
+@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ dwc->phys_ready = true;
+ }
+
++ usb_phy_init(dwc->usb2_phy);
++ usb_phy_init(dwc->usb3_phy);
++ ret = phy_init(dwc->usb2_generic_phy);
++ if (ret < 0)
++ goto err0a;
++
++ ret = phy_init(dwc->usb3_generic_phy);
++ if (ret < 0) {
++ phy_exit(dwc->usb2_generic_phy);
++ goto err0a;
++ }
++
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+- goto err0a;
++ goto err1;
+
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index ae29ff2b2b686..37c94031af1ed 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -348,6 +348,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
+ .bInterval = 4,
+ };
+
++static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
++ .bLength = sizeof(ss_epin_fback_desc_comp),
++ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
++ .bMaxBurst = 0,
++ .bmAttributes = 0,
++ .wBytesPerInterval = cpu_to_le16(4),
++};
++
+
+ /* Audio Streaming IN Interface - Alt0 */
+ static struct usb_interface_descriptor std_as_in_if0_desc = {
+@@ -527,6 +535,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
+ (struct usb_descriptor_header *)&ss_epout_desc_comp,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+ (struct usb_descriptor_header *)&ss_epin_fback_desc,
++ (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
+
+ (struct usb_descriptor_header *)&std_as_in_if0_desc,
+ (struct usb_descriptor_header *)&std_as_in_if1_desc,
+@@ -604,6 +613,7 @@ static void setup_headers(struct f_uac2_opts *opts,
+ {
+ struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
++ struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
+ struct usb_endpoint_descriptor *epout_desc;
+ struct usb_endpoint_descriptor *epin_desc;
+ struct usb_endpoint_descriptor *epin_fback_desc;
+@@ -626,6 +636,7 @@ static void setup_headers(struct f_uac2_opts *opts,
+ epout_desc_comp = &ss_epout_desc_comp;
+ epin_desc_comp = &ss_epin_desc_comp;
+ epin_fback_desc = &ss_epin_fback_desc;
++ epin_fback_desc_comp = &ss_epin_fback_desc_comp;
+ }
+
+ i = 0;
+@@ -654,8 +665,11 @@ static void setup_headers(struct f_uac2_opts *opts,
+
+ headers[i++] = USBDHDR(&as_iso_out_desc);
+
+- if (EPOUT_FBACK_IN_EN(opts))
++ if (EPOUT_FBACK_IN_EN(opts)) {
+ headers[i++] = USBDHDR(epin_fback_desc);
++ if (epin_fback_desc_comp)
++ headers[i++] = USBDHDR(epin_fback_desc_comp);
++ }
+ }
+ if (EPIN_EN(opts)) {
+ headers[i++] = USBDHDR(&std_as_in_if0_desc);
+@@ -937,6 +951,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
+ le16_to_cpu(ss_epout_desc.wMaxPacketSize));
+
++ ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
++ ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
++
+ hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
+ hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
+ hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index 9e5c950612d06..b1aef892bfa38 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -76,11 +76,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
+ };
+
+ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
++ struct usb_ep *out_ep,
+ unsigned long long freq,
+ unsigned int pitch,
+ void *buf)
+ {
+ u32 ff = 0;
++ const struct usb_endpoint_descriptor *ep_desc;
+
+ /*
+ * Because the pitch base is 1000000, the final divider here
+@@ -108,8 +110,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+ * byte fromat (that is Q16.16)
+ *
+ * ff = (freq << 16) / 8000
++ *
++ * Win10 and OSX UAC2 drivers require number of samples per packet
++ * in order to honor the feedback value.
++ * Linux snd-usb-audio detects the applied bit-shift automatically.
+ */
+- freq <<= 4;
++ ep_desc = out_ep->desc;
++ freq <<= 4 + (ep_desc->bInterval - 1);
+ }
+
+ ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
+@@ -247,7 +254,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
+ pr_debug("%s: iso_complete status(%d) %d/%d\n",
+ __func__, status, req->actual, req->length);
+
+- u_audio_set_fback_frequency(audio_dev->gadget->speed,
++ u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
+ params->c_srate, prm->pitch,
+ req->buf);
+
+@@ -506,7 +513,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
+ * be meauserd at start of playback
+ */
+ prm->pitch = 1000000;
+- u_audio_set_fback_frequency(audio_dev->gadget->speed,
++ u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
+ params->c_srate, prm->pitch,
+ req_fback->buf);
+
+diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
+index 65cae48834545..38e4d6b505a05 100644
+--- a/drivers/usb/gadget/udc/r8a66597-udc.c
++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
+@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+ do {
+ tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+ udelay(1);
+- } while (tmp != CS_IDST || timeout-- > 0);
++ } while (tmp != CS_IDST && timeout-- > 0);
+
+ if (tmp == CS_IDST)
+ r8a66597_bset(r8a66597,
+diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
+index 337b425dd4b04..2df52f75f6b3c 100644
+--- a/drivers/usb/host/bcma-hcd.c
++++ b/drivers/usb/host/bcma-hcd.c
+@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
+ return -ENOMEM;
+ usb_dev->core = core;
+
+- if (core->dev.of_node) {
++ if (core->dev.of_node)
+ usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
+ GPIOD_OUT_HIGH);
+- if (IS_ERR(usb_dev->gpio_desc))
+- return PTR_ERR(usb_dev->gpio_desc);
+- }
+
+ switch (core->id.id) {
+ case BCMA_CORE_USB20_HOST:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 18a203c9011eb..4a1346e3de1b2 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
+ if (ret)
+ xhci_free_command(xhci, command);
+ }
++ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB2 roothub");
+
+diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
+index e517376c32917..cf13db3d1695d 100644
+--- a/drivers/usb/isp1760/isp1760-hcd.c
++++ b/drivers/usb/isp1760/isp1760-hcd.c
+@@ -251,7 +251,7 @@ static int isp1760_hcd_set_and_wait(struct usb_hcd *hcd, u32 field,
+ isp1760_hcd_set(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+- val, 10, timeout_us);
++ val, 0, timeout_us);
+ }
+
+ static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
+@@ -263,7 +263,7 @@ static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
+ isp1760_hcd_set(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+- !val, 10, timeout_us);
++ !val, 0, timeout_us);
+ }
+
+ static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
+@@ -275,7 +275,7 @@ static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
+ isp1760_hcd_clear(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+- !val, 10, timeout_us);
++ !val, 0, timeout_us);
+ }
+
+ static bool isp1760_hcd_is_set(struct usb_hcd *hcd, u32 field)
+diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
+index c429376922079..c968ecda42aa8 100644
+--- a/drivers/usb/musb/tusb6010.c
++++ b/drivers/usb/musb/tusb6010.c
+@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+ }
+ if (len > 0) {
+ /* Write the rest 1 - 3 bytes to FIFO */
++ val = 0;
+ memcpy(&val, buf, len);
+ musb_writel(fifo, 0, val);
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index d48bed5782a5c..3aaf52d9985bd 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
+ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
+ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
++ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
+ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+@@ -258,6 +259,7 @@ struct cp210x_serial_private {
+ speed_t max_speed;
+ bool use_actual_rate;
+ bool no_flow_control;
++ bool no_event_mode;
+ };
+
+ enum cp210x_event_state {
+@@ -1112,12 +1114,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
+
+ static void cp210x_enable_event_mode(struct usb_serial_port *port)
+ {
++ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ int ret;
+
+ if (port_priv->event_mode)
+ return;
+
++ if (priv->no_event_mode)
++ return;
++
+ port_priv->event_state = ES_DATA;
+ port_priv->event_mode = true;
+
+@@ -2097,6 +2103,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
+ priv->use_actual_rate = use_actual_rate;
+ }
+
++static void cp2102_determine_quirks(struct usb_serial *serial)
++{
++ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
++ u8 *buf;
++ int ret;
++
++ buf = kmalloc(2, GFP_KERNEL);
++ if (!buf)
++ return;
++ /*
++ * Some (possibly counterfeit) CP2102 do not support event-insertion
++ * mode and respond differently to malformed vendor requests.
++ * Specifically, they return one instead of two bytes when sent a
++ * two-byte part-number request.
++ */
++ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
++ CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
++ CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
++ if (ret == 1) {
++ dev_dbg(&serial->interface->dev,
++ "device does not support event-insertion mode\n");
++ priv->no_event_mode = true;
++ }
++
++ kfree(buf);
++}
++
+ static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+ {
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+@@ -2122,6 +2155,9 @@ static void cp210x_determine_quirks(struct usb_serial *serial)
+ int ret;
+
+ switch (priv->partnum) {
++ case CP210X_PARTNUM_CP2102:
++ cp2102_determine_quirks(serial);
++ break;
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index d7fe33ca73e4c..925067a7978d4 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -107,7 +107,6 @@
+ #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
+ #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+ #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
+-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
+
+ /* Interrupt Routine Defines */
+
+@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
+- { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
+ {} /* terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 29c765cc84957..6cfb5d33609fb 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1205,6 +1205,14 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
+ .driver_info = NCTRL(2) | RSVD(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
++ .driver_info = NCTRL(0) | RSVD(1) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
++ .driver_info = NCTRL(2) | RSVD(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
++ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -1650,7 +1658,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
+@@ -2068,6 +2075,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
++ .driver_info = RSVD(3) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index efa972be2ee34..c6b3fcf901805 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -416,9 +416,16 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
+ USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
+
+ /*
+- * Reported by Ondrej Zary <linux@rainbow-software.org>
++ * Reported by Ondrej Zary <linux@zary.sk>
+ * The device reports one sector more and breaks when that sector is accessed
++ * Firmwares older than 2.6c (the latest one and the only that claims Linux
++ * support) have also broken tag handling
+ */
++UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,
++ "ScanLogic",
++ "SL11R-IDE",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
+ UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
+ "ScanLogic",
+ "SL11R-IDE",
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index c35a6db993f1b..4051c8cd0cd8a 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+ "LaCie",
+ "Rugged USB3-FW",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+- US_FL_IGNORE_UAS),
++ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+
+ /*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 671c71245a7b2..43ebfe36ac276 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -43,6 +43,8 @@
+ #include <linux/sched.h>
+ #include <linux/cred.h>
+ #include <linux/errno.h>
++#include <linux/freezer.h>
++#include <linux/kthread.h>
+ #include <linux/mm.h>
+ #include <linux/memblock.h>
+ #include <linux/pagemap.h>
+@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
+ #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
+
+ /*
+- * balloon_process() state:
++ * balloon_thread() state:
+ *
+ * BP_DONE: done or nothing to do,
+ * BP_WAIT: wait to be rescheduled,
+@@ -130,6 +132,8 @@ enum bp_state {
+ BP_ECANCELED
+ };
+
++/* Main waiting point for xen-balloon thread. */
++static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
+
+ static DEFINE_MUTEX(balloon_mutex);
+
+@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
+ static LIST_HEAD(ballooned_pages);
+ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
+
+-/* Main work function, always executed in process context. */
+-static void balloon_process(struct work_struct *work);
+-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+-
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+ want the kernel to try too hard since that can trigger the oom killer. */
+ #define GFP_BALLOON \
+@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
+ static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
+ {
+ if (val == MEM_ONLINE)
+- schedule_delayed_work(&balloon_worker, 0);
++ wake_up(&balloon_thread_wq);
+
+ return NOTIFY_OK;
+ }
+@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
+ }
+
+ /*
+- * As this is a work item it is guaranteed to run as a single instance only.
++ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
++ * needed, or if the credit has changed while state is BP_EAGAIN.
++ */
++static bool balloon_thread_cond(enum bp_state state, long credit)
++{
++ if (state != BP_EAGAIN)
++ credit = 0;
++
++ return current_credit() != credit || kthread_should_stop();
++}
++
++/*
++ * As this is a kthread it is guaranteed to run as a single instance only.
+ * We may of course race updates of the target counts (which are protected
+ * by the balloon lock), or with changes to the Xen hard limit, but we will
+ * recover from these in time.
+ */
+-static void balloon_process(struct work_struct *work)
++static int balloon_thread(void *unused)
+ {
+ enum bp_state state = BP_DONE;
+ long credit;
++ unsigned long timeout;
++
++ set_freezable();
++ for (;;) {
++ if (state == BP_EAGAIN)
++ timeout = balloon_stats.schedule_delay * HZ;
++ else
++ timeout = 3600 * HZ;
++ credit = current_credit();
+
++ wait_event_freezable_timeout(balloon_thread_wq,
++ balloon_thread_cond(state, credit), timeout);
++
++ if (kthread_should_stop())
++ return 0;
+
+- do {
+ mutex_lock(&balloon_mutex);
+
+ credit = current_credit();
+@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
+ mutex_unlock(&balloon_mutex);
+
+ cond_resched();
+-
+- } while (credit && state == BP_DONE);
+-
+- /* Schedule more work if there is some still to be done. */
+- if (state == BP_EAGAIN)
+- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
++ }
+ }
+
+ /* Resets the Xen limit, sets new target, and kicks off processing. */
+@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
+ {
+ /* No need for lock. Not read-modify-write updates. */
+ balloon_stats.target_pages = target;
+- schedule_delayed_work(&balloon_worker, 0);
++ wake_up(&balloon_thread_wq);
+ }
+ EXPORT_SYMBOL_GPL(balloon_set_new_target);
+
+@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
+
+ /* The balloon may be too large now. Shrink it if needed. */
+ if (current_credit())
+- schedule_delayed_work(&balloon_worker, 0);
++ wake_up(&balloon_thread_wq);
+
+ mutex_unlock(&balloon_mutex);
+ }
+@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
+
+ static int __init balloon_init(void)
+ {
++ struct task_struct *task;
++
+ if (!xen_domain())
+ return -ENODEV;
+
+@@ -722,6 +744,12 @@ static int __init balloon_init(void)
+ }
+ #endif
+
++ task = kthread_run(balloon_thread, NULL, "xen-balloon");
++ if (IS_ERR(task)) {
++ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
++ return PTR_ERR(task);
++ }
++
+ /* Init the xen-balloon driver. */
+ xen_balloon_init();
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index ac829e63c5704..54ee54ae36bc8 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+ */
+ static int afs_d_revalidate_rcu(struct dentry *dentry)
+ {
+- struct afs_vnode *dvnode, *vnode;
++ struct afs_vnode *dvnode;
+ struct dentry *parent;
+- struct inode *dir, *inode;
++ struct inode *dir;
+ long dir_version, de_version;
+
+ _enter("%p", dentry);
+@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
+ return -ECHILD;
+ }
+
+- /* Check to see if the vnode referred to by the dentry still
+- * has a callback.
+- */
+- if (d_really_is_positive(dentry)) {
+- inode = d_inode_rcu(dentry);
+- if (inode) {
+- vnode = AFS_FS_I(inode);
+- if (!afs_check_validity(vnode))
+- return -ECHILD;
+- }
+- }
+-
+ return 1; /* Still valid */
+ }
+
+@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ if (IS_ERR(key))
+ key = NULL;
+
+- if (d_really_is_positive(dentry)) {
+- inode = d_inode(dentry);
+- if (inode) {
+- vnode = AFS_FS_I(inode);
+- afs_validate(vnode, key);
+- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+- goto out_bad;
+- }
+- }
+-
+- /* lock down the parent dentry so we can peer at it */
++ /* Hold the parent dentry so we can peer at it */
+ parent = dget_parent(dentry);
+ dir = AFS_FS_I(d_inode(parent));
+
+@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+
+ if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
+ _debug("%pd: parent dir deleted", dentry);
+- goto out_bad_parent;
++ goto not_found;
+ }
+
+ /* We only need to invalidate a dentry if the server's copy changed
+@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ case 0:
+ /* the filename maps to something */
+ if (d_really_is_negative(dentry))
+- goto out_bad_parent;
++ goto not_found;
+ inode = d_inode(dentry);
+ if (is_bad_inode(inode)) {
+ printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
+ dentry);
+- goto out_bad_parent;
++ goto not_found;
+ }
+
+ vnode = AFS_FS_I(inode);
+@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ dentry, fid.unique,
+ vnode->fid.unique,
+ vnode->vfs_inode.i_generation);
+- write_seqlock(&vnode->cb_lock);
+- set_bit(AFS_VNODE_DELETED, &vnode->flags);
+- write_sequnlock(&vnode->cb_lock);
+ goto not_found;
+ }
+ goto out_valid;
+@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ default:
+ _debug("failed to iterate dir %pd: %d",
+ parent, ret);
+- goto out_bad_parent;
++ goto not_found;
+ }
+
+ out_valid:
+@@ -1256,16 +1231,9 @@ out_valid_noupdate:
+ _leave(" = 1 [valid]");
+ return 1;
+
+- /* the dirent, if it exists, now points to a different vnode */
+ not_found:
+- spin_lock(&dentry->d_lock);
+- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+- spin_unlock(&dentry->d_lock);
+-
+-out_bad_parent:
+ _debug("dropping dentry %pd2", dentry);
+ dput(parent);
+-out_bad:
+ key_put(key);
+
+ _leave(" = 0 [bad]");
+diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
+index f4600c1353adf..540b9fc96824a 100644
+--- a/fs/afs/dir_edit.c
++++ b/fs/afs/dir_edit.c
+@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
+ if (b == nr_blocks) {
+ _debug("init %u", b);
+ afs_edit_init_block(meta, block, b);
+- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
++ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ }
+
+ /* Only lower dir pages have a counter in the header. */
+@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
+ new_directory:
+ afs_edit_init_block(meta, meta, 0);
+ i_size = AFS_DIR_BLOCK_SIZE;
+- i_size_write(&vnode->vfs_inode, i_size);
++ afs_set_i_size(vnode, i_size);
+ slot = AFS_DIR_RESV_BLOCKS0;
+ page = page0;
+ block = meta;
+diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
+index e7e98ad63a91a..c0031a3ab42f5 100644
+--- a/fs/afs/fs_probe.c
++++ b/fs/afs/fs_probe.c
+@@ -9,6 +9,7 @@
+ #include <linux/slab.h>
+ #include "afs_fs.h"
+ #include "internal.h"
++#include "protocol_afs.h"
+ #include "protocol_yfs.h"
+
+ static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
+@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
+ struct afs_addr_list *alist = call->alist;
+ struct afs_server *server = call->server;
+ unsigned int index = call->addr_ix;
+- unsigned int rtt_us = 0;
++ unsigned int rtt_us = 0, cap0;
+ int ret = call->error;
+
+ _enter("%pU,%u", &server->uuid, index);
+@@ -159,6 +160,11 @@ responded:
+ clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+ alist->addrs[index].srx_service = call->service_id;
+ }
++ cap0 = ntohl(call->tmp);
++ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
++ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
++ else
++ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ }
+
+ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
+index dd3f45d906d23..4943413d9c5f7 100644
+--- a/fs/afs/fsclient.c
++++ b/fs/afs/fsclient.c
+@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
+ struct afs_read *req = op->fetch.req;
+ __be32 *bp;
+
+- if (upper_32_bits(req->pos) ||
+- upper_32_bits(req->len) ||
+- upper_32_bits(req->pos + req->len))
++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
+ return afs_fs_fetch_data64(op);
+
+ _enter("");
+@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
+ (unsigned long long)op->store.pos,
+ (unsigned long long)op->store.i_size);
+
+- if (upper_32_bits(op->store.pos) ||
+- upper_32_bits(op->store.size) ||
+- upper_32_bits(op->store.i_size))
++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
+ return afs_fs_store_data64(op);
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
+@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
+
+ ASSERT(attr->ia_valid & ATTR_SIZE);
+- if (upper_32_bits(attr->ia_size))
++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
+ return afs_fs_setattr_size64(op);
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
+@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
+ return ret;
+
+ count = ntohl(call->tmp);
+-
+ call->count = count;
+ call->count2 = count;
+- afs_extract_discard(call, count * sizeof(__be32));
++ if (count == 0) {
++ call->unmarshall = 4;
++ call->tmp = 0;
++ break;
++ }
++
++ /* Extract the first word of the capabilities to call->tmp */
++ afs_extract_to_tmp(call);
+ call->unmarshall++;
+ fallthrough;
+
+- /* Extract capabilities words */
+ case 2:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+- /* TODO: Examine capabilities */
++ afs_extract_discard(call, (count - 1) * sizeof(__be32));
++ call->unmarshall++;
++ fallthrough;
++
++ /* Extract remaining capabilities words */
++ case 3:
++ ret = afs_extract_data(call, false);
++ if (ret < 0)
++ return ret;
+
+ call->unmarshall++;
+ break;
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 80b6c8d967d5c..c18cbc69fa582 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -53,16 +53,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
+ dump_stack();
+ }
+
+-/*
+- * Set the file size and block count. Estimate the number of 512 bytes blocks
+- * used, rounded up to nearest 1K for consistency with other AFS clients.
+- */
+-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+-{
+- i_size_write(&vnode->vfs_inode, size);
+- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+-}
+-
+ /*
+ * Initialise an inode from the vnode status.
+ */
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 5ed416f4ff335..345494881f655 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -516,6 +516,7 @@ struct afs_server {
+ #define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
+ #define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
+ #define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
++#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
+ atomic_t ref; /* Object refcount */
+ atomic_t active; /* Active user count */
+ u32 addr_version; /* Address list version */
+@@ -1585,6 +1586,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
+ (void *)(unsigned long)dir_vp->scb.status.data_version;
+ }
+
++/*
++ * Set the file size and block count. Estimate the number of 512 bytes blocks
++ * used, rounded up to nearest 1K for consistency with other AFS clients.
++ */
++static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
++{
++ i_size_write(&vnode->vfs_inode, size);
++ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
++}
++
+ /*
+ * Check for a conflicting operation on a directory that we just unlinked from.
+ * If someone managed to sneak a link or an unlink in on the file we just
+diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
+new file mode 100644
+index 0000000000000..0c39358c8b702
+--- /dev/null
++++ b/fs/afs/protocol_afs.h
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/* AFS protocol bits
++ *
++ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
++ * Written by David Howells (dhowells@redhat.com)
++ */
++
++
++#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
++
++/* AFS3 Fileserver capabilities word 0 */
++#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
++#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
++#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
++#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
+diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
+index b5bd03b1d3c7f..e4cd89c44c465 100644
+--- a/fs/afs/protocol_yfs.h
++++ b/fs/afs/protocol_yfs.h
+@@ -168,3 +168,9 @@ enum yfs_lock_type {
+ yfs_LockMandatoryWrite = 0x101,
+ yfs_LockMandatoryExtend = 0x102,
+ };
++
++/* RXYFS Viced Capability Flags */
++#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
++#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
++#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
++#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index c0534697268ef..e86f5a245514d 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ write_seqlock(&vnode->cb_lock);
+ i_size = i_size_read(&vnode->vfs_inode);
+ if (maybe_i_size > i_size)
+- i_size_write(&vnode->vfs_inode, maybe_i_size);
++ afs_set_i_size(vnode, maybe_i_size);
+ write_sequnlock(&vnode->cb_lock);
+ }
+
+@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
+ }
+
+ /* Has the page moved or been split? */
+- if (unlikely(page != xas_reload(&xas)))
++ if (unlikely(page != xas_reload(&xas))) {
++ put_page(page);
+ break;
++ }
+
+- if (!trylock_page(page))
++ if (!trylock_page(page)) {
++ put_page(page);
+ break;
++ }
+ if (!PageDirty(page) || PageWriteback(page)) {
+ unlock_page(page);
++ put_page(page);
+ break;
+ }
+
+@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
+ t = afs_page_dirty_to(page, priv);
+ if (f != 0 && !new_content) {
+ unlock_page(page);
++ put_page(page);
+ break;
+ }
+
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 46e8415fa2c55..0842efa6f7120 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
+ {
+ lockdep_assert_held(&info->lock);
+
+- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
++ /* The free space could be negative in case of overcommit */
++ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
+ info->flags,
+- info->total_bytes - btrfs_space_info_used(info, true),
++ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
+ info->full ? "" : "not ");
+ btrfs_info(fs_info,
+ "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index c6a9542ca281b..cf2141483b37f 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1403,6 +1403,7 @@ struct cifsInodeInfo {
+ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
+ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */
+ #define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
++#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
+ unsigned long flags;
+ spinlock_t writers_lock;
+ unsigned int writers; /* Number of writers on this inode */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 3781eee9360af..65d3cf80444bf 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2382,9 +2382,10 @@ cifs_match_super(struct super_block *sb, void *data)
+ spin_lock(&cifs_tcp_ses_lock);
+ cifs_sb = CIFS_SB(sb);
+ tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+- if (IS_ERR(tlink)) {
++ if (tlink == NULL) {
++ /* can not match superblock if tlink were ever null */
+ spin_unlock(&cifs_tcp_ses_lock);
+- return rc;
++ return 0;
+ }
+ tcon = tlink_tcon(tlink);
+ ses = tcon->ses;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index bb98fbdd22a99..ab2734159c192 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -881,6 +881,7 @@ int cifs_close(struct inode *inode, struct file *file)
+ dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
+ if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+ cinode->lease_granted &&
++ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
+ dclose) {
+ if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+@@ -1861,6 +1862,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
+ cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+ tcon->ses->server);
+ cifs_sb = CIFS_FILE_SB(file);
++ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
+
+ if (cap_unix(tcon->ses) &&
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+@@ -3108,7 +3110,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
+ struct cifs_tcon *tcon;
+ struct cifs_sb_info *cifs_sb;
+ struct dentry *dentry = ctx->cfile->dentry;
+- int rc;
++ ssize_t rc;
+
+ tcon = tlink_tcon(ctx->cfile->tlink);
+ cifs_sb = CIFS_SB(dentry->d_sb);
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 9469f1cf0b46a..57e695e3c969b 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -736,7 +736,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+- continue;
++ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+@@ -767,7 +767,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+- continue;
++ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 754d59f734d84..699a08d724c24 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4043,7 +4043,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
+ int i, bid = pbuf->bid;
+
+ for (i = 0; i < pbuf->nbufs; i++) {
+- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
++ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
+ if (!buf)
+ break;
+
+@@ -4969,7 +4969,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
+ if (req->poll.events & EPOLLONESHOT)
+ flags = 0;
+ if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
+- req->poll.done = true;
++ req->poll.events |= EPOLLONESHOT;
+ flags = 0;
+ }
+ if (flags & IORING_CQE_F_MORE)
+@@ -4993,6 +4993,7 @@ static void io_poll_task_func(struct io_kiocb *req)
+ if (done) {
+ io_poll_remove_double(req);
+ hash_del(&req->hash_node);
++ req->poll.done = true;
+ } else {
+ req->result = 0;
+ add_wait_queue(req->poll.head, &req->poll.wait);
+@@ -5126,6 +5127,7 @@ static void io_async_task_func(struct io_kiocb *req)
+
+ hash_del(&req->hash_node);
+ io_poll_remove_double(req);
++ apoll->poll.done = true;
+ spin_unlock_irq(&ctx->completion_lock);
+
+ if (!READ_ONCE(apoll->poll.canceled))
+@@ -5917,19 +5919,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
+ struct io_uring_rsrc_update2 up;
+ int ret;
+
+- if (issue_flags & IO_URING_F_NONBLOCK)
+- return -EAGAIN;
+-
+ up.offset = req->rsrc_update.offset;
+ up.data = req->rsrc_update.arg;
+ up.nr = 0;
+ up.tags = 0;
+ up.resv = 0;
+
+- mutex_lock(&ctx->uring_lock);
++ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
+ &up, req->rsrc_update.nr_args);
+- mutex_unlock(&ctx->uring_lock);
++ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+
+ if (ret < 0)
+ req_set_fail(req);
+diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
+index c69a0bb76c940..4f1a451da5ba2 100644
+--- a/fs/lockd/svcxdr.h
++++ b/fs/lockd/svcxdr.h
+@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
+ static inline bool
+ svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
+ {
+- unsigned int quadlen = XDR_QUADLEN(obj->len);
+- __be32 *p;
+-
+- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
+- return false;
+- p = xdr_reserve_space(xdr, obj->len);
+- if (!p)
++ if (obj->len > XDR_MAX_NETOBJ)
+ return false;
+- p[quadlen - 1] = 0; /* XDR pad */
+- memcpy(p, obj->data, obj->len);
+-
+- return true;
++ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
+ }
+
+ #endif /* _LOCKD_SVCXDR_H_ */
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 48fd369c29a4b..a2a2ae37b859a 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3939,7 +3939,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ oi = OCFS2_I(inode);
+ oi->ip_dir_lock_gen++;
+ mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
+- goto out;
++ goto out_forget;
+ }
+
+ if (!S_ISREG(inode->i_mode))
+@@ -3970,6 +3970,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ filemap_fdatawait(mapping);
+ }
+
++out_forget:
+ forget_all_cached_acls(inode);
+
+ out:
+diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
+index a6ee23aadd283..66645a5a35f30 100644
+--- a/fs/qnx4/dir.c
++++ b/fs/qnx4/dir.c
+@@ -15,13 +15,48 @@
+ #include <linux/buffer_head.h>
+ #include "qnx4.h"
+
++/*
++ * A qnx4 directory entry is an inode entry or link info
++ * depending on the status field in the last byte. The
++ * first byte is where the name start either way, and a
++ * zero means it's empty.
++ *
++ * Also, due to a bug in gcc, we don't want to use the
++ * real (differently sized) name arrays in the inode and
++ * link entries, but always the 'de_name[]' one in the
++ * fake struct entry.
++ *
++ * See
++ *
++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
++ *
++ * for details, but basically gcc will take the size of the
++ * 'name' array from one of the used union entries randomly.
++ *
++ * This use of 'de_name[]' (48 bytes) avoids the false positive
++ * warnings that would happen if gcc decides to use 'inode.di_name'
++ * (16 bytes) even when the pointer and size were to come from
++ * 'link.dl_name' (48 bytes).
++ *
++ * In all cases the actual name pointer itself is the same, it's
++ * only the gcc internal 'what is the size of this field' logic
++ * that can get confused.
++ */
++union qnx4_directory_entry {
++ struct {
++ const char de_name[48];
++ u8 de_pad[15];
++ u8 de_status;
++ };
++ struct qnx4_inode_entry inode;
++ struct qnx4_link_info link;
++};
++
+ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
+ {
+ struct inode *inode = file_inode(file);
+ unsigned int offset;
+ struct buffer_head *bh;
+- struct qnx4_inode_entry *de;
+- struct qnx4_link_info *le;
+ unsigned long blknum;
+ int ix, ino;
+ int size;
+@@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
+ }
+ ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
+ for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
++ union qnx4_directory_entry *de;
++
+ offset = ix * QNX4_DIR_ENTRY_SIZE;
+- de = (struct qnx4_inode_entry *) (bh->b_data + offset);
+- if (!de->di_fname[0])
++ de = (union qnx4_directory_entry *) (bh->b_data + offset);
++
++ if (!de->de_name[0])
+ continue;
+- if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
++ if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+ continue;
+- if (!(de->di_status & QNX4_FILE_LINK))
+- size = QNX4_SHORT_NAME_MAX;
+- else
+- size = QNX4_NAME_MAX;
+- size = strnlen(de->di_fname, size);
+- QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
+- if (!(de->di_status & QNX4_FILE_LINK))
++ if (!(de->de_status & QNX4_FILE_LINK)) {
++ size = sizeof(de->inode.di_fname);
+ ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
+- else {
+- le = (struct qnx4_link_info*)de;
+- ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
++ } else {
++ size = sizeof(de->link.dl_fname);
++ ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
+ QNX4_INODES_PER_BLOCK +
+- le->dl_inode_ndx;
++ de->link.dl_inode_ndx;
+ }
+- if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
++ size = strnlen(de->de_name, size);
++ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
++ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
+ brelse(bh);
+ return 0;
+ }
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index b67261a1e3e9c..3d5af56337bdb 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ (typeof(ptr)) (__ptr + (off)); })
+ #endif
+
++#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
++
+ #ifndef OPTIMIZER_HIDE_VAR
+ /* Make the optimizer believe the variable can be manipulated arbitrarily. */
+ #define OPTIMIZER_HIDE_VAR(var) \
+diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
+index 6beb26b7151d2..86be8bf27b41b 100644
+--- a/include/linux/pkeys.h
++++ b/include/linux/pkeys.h
+@@ -4,6 +4,8 @@
+
+ #include <linux/mm.h>
+
++#define ARCH_DEFAULT_PKEY 0
++
+ #ifdef CONFIG_ARCH_HAS_PKEYS
+ #include <asm/pkeys.h>
+ #else /* ! CONFIG_ARCH_HAS_PKEYS */
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 548a028f2dabb..2c1fc9212cf28 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -124,6 +124,7 @@ struct usb_hcd {
+ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
+ #define HCD_FLAG_DEAD 6 /* controller has died? */
+ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
++#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
+
+ /* The flags can be tested using these macros; they are likely to
+ * be slightly faster than test_bit().
+@@ -134,6 +135,7 @@ struct usb_hcd {
+ #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
+ #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
+ #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
++#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
+
+ /*
+ * Specifies if interfaces are authorized by default
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index d833f717e8022..004514a21e306 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -575,8 +575,16 @@ struct dsa_switch_ops {
+ int (*change_tag_protocol)(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol proto);
+
++ /* Optional switch-wide initialization and destruction methods */
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
++
++ /* Per-port initialization and destruction methods. Mandatory if the
++ * driver registers devlink port regions, optional otherwise.
++ */
++ int (*port_setup)(struct dsa_switch *ds, int port);
++ void (*port_teardown)(struct dsa_switch *ds, int port);
++
+ u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
+
+ /*
+diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
+index bf9806fd13065..db4f2cec83606 100644
+--- a/include/trace/events/erofs.h
++++ b/include/trace/events/erofs.h
+@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(erofs_nid_t, nid )
+- __field(const char *, name )
++ __string(name, dentry->d_name.name )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->nid = EROFS_I(dir)->nid;
+- __entry->name = dentry->d_name.name;
++ __assign_str(name, dentry->d_name.name);
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
+ show_dev_nid(__entry),
+- __entry->name,
++ __get_str(name),
+ __entry->flags)
+ );
+
+diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
+index 20e435fe657a1..3246f2c746969 100644
+--- a/include/uapi/linux/android/binder.h
++++ b/include/uapi/linux/android/binder.h
+@@ -225,7 +225,14 @@ struct binder_freeze_info {
+
+ struct binder_frozen_status_info {
+ __u32 pid;
++
++ /* process received sync transactions since last frozen
++ * bit 0: received sync transaction after being frozen
++ * bit 1: new pending sync transaction during freezing
++ */
+ __u32 sync_recv;
++
++ /* process received async transactions since last frozen */
+ __u32 async_recv;
+ };
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9d94ac6ff50c4..592b9b68cbd93 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -9641,6 +9641,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
+ nr_linfo = attr->line_info_cnt;
+ if (!nr_linfo)
+ return 0;
++ if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
++ return -EINVAL;
+
+ rec_size = attr->line_info_rec_size;
+ if (rec_size < MIN_BPF_LINEINFO_SIZE ||
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index f2faa13534e57..70519f67556f9 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
+ pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
+ global_disable = true;
+ } else if (rc == -EEXIST) {
+- pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
++ err_printk(entry->dev, entry,
++ "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+ }
+ }
+
+diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
+index 49972ee99aff6..049fd06b4c3de 100644
+--- a/kernel/entry/kvm.c
++++ b/kernel/entry/kvm.c
+@@ -19,8 +19,10 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
+ if (ti_work & _TIF_NEED_RESCHED)
+ schedule();
+
+- if (ti_work & _TIF_NOTIFY_RESUME)
++ if (ti_work & _TIF_NOTIFY_RESUME) {
+ tracehook_notify_resume(NULL);
++ rseq_handle_notify_resume(NULL, NULL);
++ }
+
+ ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
+ if (ret)
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 35f7bd0fced0e..6d45ac3dae7fb 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
+
+ if (unlikely(t->flags & PF_EXITING))
+ return;
+- ret = rseq_ip_fixup(regs);
+- if (unlikely(ret < 0))
+- goto error;
++
++ /*
++ * regs is NULL if and only if the caller is in a syscall path. Skip
++ * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
++ * kill a misbehaving userspace on debug kernels.
++ */
++ if (regs) {
++ ret = rseq_ip_fixup(regs);
++ if (unlikely(ret < 0))
++ goto error;
++ }
+ if (unlikely(rseq_update_cpu_id(t)))
+ goto error;
+ return;
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index c221e4c3f625c..fa91f398f28b7 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
+ if (bt == NULL)
+ return -EINVAL;
+
++ if (bt->trace_state == Blktrace_running) {
++ bt->trace_state = Blktrace_stopped;
++ spin_lock_irq(&running_trace_lock);
++ list_del_init(&bt->running_list);
++ spin_unlock_irq(&running_trace_lock);
++ relay_flush(bt->rchan);
++ }
++
+ put_probe_ref();
+ synchronize_rcu();
+ blk_trace_free(bt);
+diff --git a/mm/debug.c b/mm/debug.c
+index e73fe0a8ec3d2..e61037cded980 100644
+--- a/mm/debug.c
++++ b/mm/debug.c
+@@ -24,7 +24,8 @@ const char *migrate_reason_names[MR_TYPES] = {
+ "syscall_or_cpuset",
+ "mempolicy_mbind",
+ "numa_misplaced",
+- "cma",
++ "contig_range",
++ "longterm_pin",
+ };
+
+ const struct trace_print_flags pageflag_names[] = {
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 83811c976c0cb..7df9fde18004c 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1127,7 +1127,7 @@ static int page_action(struct page_state *ps, struct page *p,
+ */
+ static inline bool HWPoisonHandlable(struct page *page)
+ {
+- return PageLRU(page) || __PageMovable(page);
++ return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
+ }
+
+ static int __get_hwpoison_page(struct page *page)
+diff --git a/mm/util.c b/mm/util.c
+index 9043d03750a73..c18202b3e659d 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -768,7 +768,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+ {
+ struct ctl_table t;
+- int new_policy;
++ int new_policy = -1;
+ int ret;
+
+ /*
+@@ -786,7 +786,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
+ t = *table;
+ t.data = &new_policy;
+ ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+- if (ret)
++ if (ret || new_policy == -1)
+ return ret;
+
+ mm_compute_batch(new_policy);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8f1a47ad6781a..693f15a056304 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6988,12 +6988,16 @@ EXPORT_SYMBOL(napi_disable);
+ */
+ void napi_enable(struct napi_struct *n)
+ {
+- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+- smp_mb__before_atomic();
+- clear_bit(NAPI_STATE_SCHED, &n->state);
+- clear_bit(NAPI_STATE_NPSVC, &n->state);
+- if (n->dev->threaded && n->thread)
+- set_bit(NAPI_STATE_THREADED, &n->state);
++ unsigned long val, new;
++
++ do {
++ val = READ_ONCE(n->state);
++ BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
++
++ new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
++ if (n->dev->threaded && n->thread)
++ new |= NAPIF_STATE_THREADED;
++ } while (cmpxchg(&n->state, val, new) != val);
+ }
+ EXPORT_SYMBOL(napi_enable);
+
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 79267b00af68f..76ed5ef0e36a8 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -342,6 +342,7 @@ static int dsa_port_setup(struct dsa_port *dp)
+ {
+ struct devlink_port *dlp = &dp->devlink_port;
+ bool dsa_port_link_registered = false;
++ struct dsa_switch *ds = dp->ds;
+ bool dsa_port_enabled = false;
+ int err = 0;
+
+@@ -351,6 +352,12 @@ static int dsa_port_setup(struct dsa_port *dp)
+ INIT_LIST_HEAD(&dp->fdbs);
+ INIT_LIST_HEAD(&dp->mdbs);
+
++ if (ds->ops->port_setup) {
++ err = ds->ops->port_setup(ds, dp->index);
++ if (err)
++ return err;
++ }
++
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ dsa_port_disable(dp);
+@@ -393,8 +400,11 @@ static int dsa_port_setup(struct dsa_port *dp)
+ dsa_port_disable(dp);
+ if (err && dsa_port_link_registered)
+ dsa_port_link_unregister_of(dp);
+- if (err)
++ if (err) {
++ if (ds->ops->port_teardown)
++ ds->ops->port_teardown(ds, dp->index);
+ return err;
++ }
+
+ dp->setup = true;
+
+@@ -446,11 +456,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
+ static void dsa_port_teardown(struct dsa_port *dp)
+ {
+ struct devlink_port *dlp = &dp->devlink_port;
++ struct dsa_switch *ds = dp->ds;
+ struct dsa_mac_addr *a, *tmp;
+
+ if (!dp->setup)
+ return;
+
++ if (ds->ops->port_teardown)
++ ds->ops->port_teardown(ds, dp->index);
++
+ devlink_port_type_clear(dlp);
+
+ switch (dp->type) {
+@@ -494,6 +508,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
+ dp->devlink_port_setup = false;
+ }
+
++/* Destroy the current devlink port, and create a new one which has the UNUSED
++ * flavour. At this point, any call to ds->ops->port_setup has been already
++ * balanced out by a call to ds->ops->port_teardown, so we know that any
++ * devlink port regions the driver had are now unregistered. We then call its
++ * ds->ops->port_setup again, in order for the driver to re-create them on the
++ * new devlink port.
++ */
++static int dsa_port_reinit_as_unused(struct dsa_port *dp)
++{
++ struct dsa_switch *ds = dp->ds;
++ int err;
++
++ dsa_port_devlink_teardown(dp);
++ dp->type = DSA_PORT_TYPE_UNUSED;
++ err = dsa_port_devlink_setup(dp);
++ if (err)
++ return err;
++
++ if (ds->ops->port_setup) {
++ /* On error, leave the devlink port registered,
++ * dsa_switch_teardown will clean it up later.
++ */
++ err = ds->ops->port_setup(ds, dp->index);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
+ static int dsa_devlink_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+@@ -748,7 +792,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
+ devlink_params_publish(ds->devlink);
+
+ if (!ds->slave_mii_bus && ds->ops->phy_read) {
+- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
++ ds->slave_mii_bus = mdiobus_alloc();
+ if (!ds->slave_mii_bus) {
+ err = -ENOMEM;
+ goto teardown;
+@@ -758,13 +802,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
+
+ err = mdiobus_register(ds->slave_mii_bus);
+ if (err < 0)
+- goto teardown;
++ goto free_slave_mii_bus;
+ }
+
+ ds->setup = true;
+
+ return 0;
+
++free_slave_mii_bus:
++ if (ds->slave_mii_bus && ds->ops->phy_read)
++ mdiobus_free(ds->slave_mii_bus);
+ teardown:
+ if (ds->ops->teardown)
+ ds->ops->teardown(ds);
+@@ -789,8 +836,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
+ if (!ds->setup)
+ return;
+
+- if (ds->slave_mii_bus && ds->ops->phy_read)
++ if (ds->slave_mii_bus && ds->ops->phy_read) {
+ mdiobus_unregister(ds->slave_mii_bus);
++ mdiobus_free(ds->slave_mii_bus);
++ ds->slave_mii_bus = NULL;
++ }
+
+ dsa_switch_unregister_notifier(ds);
+
+@@ -850,12 +900,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_port_setup(dp);
+ if (err) {
+- dsa_port_devlink_teardown(dp);
+- dp->type = DSA_PORT_TYPE_UNUSED;
+- err = dsa_port_devlink_setup(dp);
++ err = dsa_port_reinit_as_unused(dp);
+ if (err)
+ goto teardown;
+- continue;
+ }
+ }
+
+@@ -960,6 +1007,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ teardown_master:
+ dsa_tree_teardown_master(dst);
+ teardown_switches:
++ dsa_tree_teardown_ports(dst);
+ dsa_tree_teardown_switches(dst);
+ teardown_default_cpu:
+ dsa_tree_teardown_default_cpu(dst);
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 75ca4b6e484f4..9e8100728d464 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
+ rcu_assign_pointer(old->nh_grp, newg);
+
+ if (newg->resilient) {
++ /* Make sure concurrent readers are not using 'oldg' anymore. */
++ synchronize_net();
+ rcu_assign_pointer(oldg->res_table, tmp_table);
+ rcu_assign_pointer(oldg->spare->res_table, tmp_table);
+ }
+@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
+ };
+
+ static int nexthops_dump(struct net *net, struct notifier_block *nb,
++ enum nexthop_event_type event_type,
+ struct netlink_ext_ack *extack)
+ {
+ struct rb_root *root = &net->nexthop.rb_root;
+@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
+ struct nexthop *nh;
+
+ nh = rb_entry(node, struct nexthop, rb_node);
+- err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
+- extack);
++ err = call_nexthop_notifier(nb, net, event_type, nh, extack);
+ if (err)
+ break;
+ }
+@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
+ int err;
+
+ rtnl_lock();
+- err = nexthops_dump(net, nb, extack);
++ err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
+ if (err)
+ goto unlock;
+ err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
+@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
+
+ int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+- nb);
++ int err;
++
++ rtnl_lock();
++ err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
++ nb);
++ if (err)
++ goto unlock;
++ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
++unlock:
++ rtnl_unlock();
++ return err;
+ }
+ EXPORT_SYMBOL(unregister_nexthop_notifier);
+
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index ef75c9b05f17e..68e94e9f5089a 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+- int sernum = fib6_new_sernum(info->nl_net);
+
+ if (info->nlh) {
+ if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ if (!err) {
+ if (rt->nh)
+ list_add(&rt->nh_list, &rt->nh->f6i_list);
+- __fib6_update_sernum_upto_root(rt, sernum);
++ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+ fib6_start_gc(info->nl_net, rt);
+ }
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index acbead7cf50f0..4d2abdd3cd3b1 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1291,7 +1291,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ goto alloc_skb;
+ }
+
+- must_collapse = (info->size_goal - skb->len > 0) &&
++ must_collapse = (info->size_goal > skb->len) &&
+ (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
+ if (must_collapse) {
+ size_bias = skb->len;
+@@ -1300,7 +1300,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ }
+
+ alloc_skb:
+- if (!must_collapse && !ssk->sk_tx_skb_cache &&
++ if (!must_collapse &&
+ !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
+ return 0;
+
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index e286dafd6e886..6ec1ebe878ae0 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -230,7 +230,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
+ goto out_rel;
+ }
+ /* get address to which the internal TCP socket is bound */
+- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
++ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
++ goto out_rel;
+ /* analyze IP specific data of net_device belonging to TCP socket */
+ addr6 = (struct sockaddr_in6 *)&addrs;
+ rcu_read_lock();
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c160ff50c053a..116cfd6fac1ff 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1474,7 +1474,9 @@ static void smc_conn_abort_work(struct work_struct *work)
+ abort_work);
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+
++ lock_sock(&smc->sk);
+ smc_conn_kill(conn, true);
++ release_sock(&smc->sk);
+ sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
+ }
+
+diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
+index d8886720e83d8..8441e3e1aaac3 100644
+--- a/tools/lib/perf/evsel.c
++++ b/tools/lib/perf/evsel.c
+@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
+ free(evsel);
+ }
+
+-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
++#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
+ #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
+
+ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+ int cpu, thread;
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+- FD(evsel, cpu, thread) = -1;
++ int *fd = FD(evsel, cpu, thread);
++
++ if (fd)
++ *fd = -1;
+ }
+ }
+ }
+@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
+ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
+ {
+ struct perf_evsel *leader = evsel->leader;
+- int fd;
++ int *fd;
+
+ if (evsel == leader) {
+ *group_fd = -1;
+@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
+ return -ENOTCONN;
+
+ fd = FD(leader, cpu, thread);
+- if (fd == -1)
++ if (fd == NULL || *fd == -1)
+ return -EBADF;
+
+- *group_fd = fd;
++ *group_fd = *fd;
+
+ return 0;
+ }
+@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (thread = 0; thread < threads->nr; thread++) {
+- int fd, group_fd;
++ int fd, group_fd, *evsel_fd;
++
++ evsel_fd = FD(evsel, cpu, thread);
++ if (evsel_fd == NULL)
++ return -EINVAL;
+
+ err = get_group_fd(evsel, cpu, thread, &group_fd);
+ if (err < 0)
+@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+ if (fd < 0)
+ return -errno;
+
+- FD(evsel, cpu, thread) = fd;
++ *evsel_fd = fd;
+ }
+ }
+
+@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
+ int thread;
+
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
+- if (FD(evsel, cpu, thread) >= 0)
+- close(FD(evsel, cpu, thread));
+- FD(evsel, cpu, thread) = -1;
++ int *fd = FD(evsel, cpu, thread);
++
++ if (fd && *fd >= 0) {
++ close(*fd);
++ *fd = -1;
++ }
+ }
+ }
+
+@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
+
+ for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+- int fd = FD(evsel, cpu, thread);
+- struct perf_mmap *map = MMAP(evsel, cpu, thread);
++ int *fd = FD(evsel, cpu, thread);
+
+- if (fd < 0)
++ if (fd == NULL || *fd < 0)
+ continue;
+
+- perf_mmap__munmap(map);
++ perf_mmap__munmap(MMAP(evsel, cpu, thread));
+ }
+ }
+
+@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+
+ for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+- int fd = FD(evsel, cpu, thread);
+- struct perf_mmap *map = MMAP(evsel, cpu, thread);
++ int *fd = FD(evsel, cpu, thread);
++ struct perf_mmap *map;
+
+- if (fd < 0)
++ if (fd == NULL || *fd < 0)
+ continue;
+
++ map = MMAP(evsel, cpu, thread);
+ perf_mmap__init(map, NULL, false, NULL);
+
+- ret = perf_mmap__mmap(map, &mp, fd, cpu);
++ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
+ if (ret) {
+ perf_evsel__munmap(evsel);
+ return ret;
+@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+
+ void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
+ {
+- if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
++ int *fd = FD(evsel, cpu, thread);
++
++ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
+ return NULL;
+
+ return MMAP(evsel, cpu, thread)->base;
+@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+ struct perf_counts_values *count)
+ {
+ size_t size = perf_evsel__read_size(evsel);
++ int *fd = FD(evsel, cpu, thread);
+
+ memset(count, 0, sizeof(*count));
+
+- if (FD(evsel, cpu, thread) < 0)
++ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ if (MMAP(evsel, cpu, thread) &&
+ !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
+ return 0;
+
+- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
++ if (readn(*fd, count->values, size) <= 0)
+ return -errno;
+
+ return 0;
+@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
+ int thread;
+
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+- int fd = FD(evsel, cpu, thread),
+- err = ioctl(fd, ioc, arg);
++ int err;
++ int *fd = FD(evsel, cpu, thread);
++
++ if (fd == NULL || *fd < 0)
++ return -1;
++
++ err = ioctl(*fd, ioc, arg);
+
+ if (err)
+ return err;
+diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h
+index f96baf1cef1a9..ebe8694dbef0f 100644
+--- a/tools/testing/selftests/arm64/signal/test_signals.h
++++ b/tools/testing/selftests/arm64/signal/test_signals.h
+@@ -33,10 +33,12 @@
+ */
+ enum {
+ FSSBS_BIT,
++ FSVE_BIT,
+ FMAX_END
+ };
+
+ #define FEAT_SSBS (1UL << FSSBS_BIT)
++#define FEAT_SVE (1UL << FSVE_BIT)
+
+ /*
+ * A descriptor used to describe and configure a test case.
+diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
+index 2de6e5ed5e258..22722abc9dfa9 100644
+--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
++++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
+@@ -26,6 +26,7 @@ static int sig_copyctx = SIGTRAP;
+
+ static char const *const feats_names[FMAX_END] = {
+ " SSBS ",
++ " SVE ",
+ };
+
+ #define MAX_FEATS_SZ 128
+@@ -263,16 +264,21 @@ int test_init(struct tdescr *td)
+ */
+ if (getauxval(AT_HWCAP) & HWCAP_SSBS)
+ td->feats_supported |= FEAT_SSBS;
+- if (feats_ok(td))
++ if (getauxval(AT_HWCAP) & HWCAP_SVE)
++ td->feats_supported |= FEAT_SVE;
++ if (feats_ok(td)) {
+ fprintf(stderr,
+ "Required Features: [%s] supported\n",
+ feats_to_string(td->feats_required &
+ td->feats_supported));
+- else
++ } else {
+ fprintf(stderr,
+ "Required Features: [%s] NOT supported\n",
+ feats_to_string(td->feats_required &
+ ~td->feats_supported));
++ td->result = KSFT_SKIP;
++ return 0;
++ }
+ }
+
+ /* Perform test specific additional initialization */