summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-12-19 13:09:15 -0500
committerMike Pagano <mpagano@gentoo.org>2024-12-19 13:09:15 -0500
commit0decd39961e7575d7969cb254b0aab400a59e5ed (patch)
tree9f1893c1e2e7932d429138cd6d5e14f9fad0c312
parentLinux patch 5.15.174 (diff)
downloadlinux-patches-0decd39961e7575d7969cb254b0aab400a59e5ed.tar.gz
linux-patches-0decd39961e7575d7969cb254b0aab400a59e5ed.tar.bz2
linux-patches-0decd39961e7575d7969cb254b0aab400a59e5ed.zip
Linux patch 5.15.1755.15-185
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1174_linux-5.15.175.patch2002
2 files changed, 2006 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index d3def6e5..84a783f3 100644
--- a/0000_README
+++ b/0000_README
@@ -739,6 +739,10 @@ Patch: 1173_linux-5.15.174.patch
From: https://www.kernel.org
Desc: Linux 5.15.174
+Patch: 1174_linux-5.15.175.patch
+From: https://www.kernel.org
+Desc: Linux 5.15.175
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1174_linux-5.15.175.patch b/1174_linux-5.15.175.patch
new file mode 100644
index 00000000..d9b2fae5
--- /dev/null
+++ b/1174_linux-5.15.175.patch
@@ -0,0 +1,2002 @@
+diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
+index d6bf84f061f412..921f56dffea77d 100644
+--- a/Documentation/power/runtime_pm.rst
++++ b/Documentation/power/runtime_pm.rst
+@@ -341,7 +341,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+
+ `int pm_runtime_resume_and_get(struct device *dev);`
+ - run pm_runtime_resume(dev) and if successful, increment the device's
+- usage counter; return the result of pm_runtime_resume
++ usage counter; returns 0 on success (whether or not the device's
++ runtime PM status was already 'active') or the error code from
++ pm_runtime_resume() on failure.
+
+ `int pm_request_idle(struct device *dev);`
+ - submit a request to execute the subsystem-level idle callback for the
+diff --git a/Makefile b/Makefile
+index 68ffe9d13fdd57..c8051699271752 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 174
++SUBLEVEL = 175
+ EXTRAVERSION =
+ NAME = Trick or Treat
+
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 9888e0b3f67518..6ac0c4b98e2813 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -15,7 +15,6 @@ config PARISC
+ select ARCH_SPLIT_ARG64 if !64BIT
+ select ARCH_SUPPORTS_HUGETLBFS if PA20
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+- select ARCH_HAS_CACHE_LINE_SIZE
+ select DMA_OPS
+ select RTC_CLASS
+ select RTC_DRV_GENERIC
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 99e26c686f7ffb..d53e9e27dba007 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -20,16 +20,7 @@
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+-#ifdef CONFIG_PA20
+-#define ARCH_DMA_MINALIGN 128
+-#else
+-#define ARCH_DMA_MINALIGN 32
+-#endif
+-#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
+-
+-#define arch_slab_minalign() ((unsigned)dcache_stride)
+-#define cache_line_size() dcache_stride
+-#define dma_get_cache_alignment cache_line_size
++#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+ #define __read_mostly __section(".data..read_mostly")
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index b7186deb8262bb..3401c9977baf9a 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -196,6 +196,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ }
+
++void init_cpu_devs(void);
++void get_cpu_vendor(struct cpuinfo_x86 *c);
+ extern void early_cpu_init(void);
+ extern void identify_boot_cpu(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
+index 491aadfac61178..df01a3afcf846e 100644
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -61,4 +61,19 @@
+
+ extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
+
++extern void __static_call_update_early(void *tramp, void *func);
++
++#define static_call_update_early(name, _func) \
++({ \
++ typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
++ if (static_call_initialized) { \
++ __static_call_update(&STATIC_CALL_KEY(name), \
++ STATIC_CALL_TRAMP_ADDR(name), __F);\
++ } else { \
++ WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
++ __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
++ __F); \
++ } \
++})
++
+ #endif /* _ASM_STATIC_CALL_H */
+diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
+index ab7382f92aff27..96bda43538ee70 100644
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -8,7 +8,7 @@
+ #include <asm/special_insns.h>
+
+ #ifdef CONFIG_X86_32
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+ asm volatile (
+ "pushfl\n\t"
+@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
+ : ASM_CALL_CONSTRAINT : : "memory");
+ }
+ #else
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+ unsigned int tmp;
+
+@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
+ * Like all of Linux's memory ordering operations, this is a
+ * compiler barrier as well.
+ */
+-static inline void sync_core(void)
++static __always_inline void sync_core(void)
+ {
+ /*
+ * The SERIALIZE instruction is the most straightforward way to
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index 454b20815f3574..89cd98693efc53 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -39,9 +39,11 @@
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/pgtable.h>
++#include <linux/instrumentation.h>
+
+ #include <trace/events/xen.h>
+
++#include <asm/alternative.h>
+ #include <asm/page.h>
+ #include <asm/smap.h>
+ #include <asm/nospec-branch.h>
+@@ -86,11 +88,20 @@ struct xen_dm_op_buf;
+ * there aren't more than 5 arguments...)
+ */
+
+-extern struct { char _entry[32]; } hypercall_page[];
++void xen_hypercall_func(void);
++DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
+
+-#define __HYPERCALL "call hypercall_page+%c[offset]"
+-#define __HYPERCALL_ENTRY(x) \
+- [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
++#ifdef MODULE
++#define __ADDRESSABLE_xen_hypercall
++#else
++#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
++#endif
++
++#define __HYPERCALL \
++ __ADDRESSABLE_xen_hypercall \
++ "call __SCT__xen_hypercall"
++
++#define __HYPERCALL_ENTRY(x) "a" (x)
+
+ #ifdef CONFIG_X86_32
+ #define __HYPERCALL_RETREG "eax"
+@@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ __HYPERCALL_0ARG(); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_0PARAM \
+- : __HYPERCALL_ENTRY(name) \
++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
+ : __HYPERCALL_CLOBBER0); \
+ (type)__res; \
+ })
+@@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ __HYPERCALL_1ARG(a1); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_1PARAM \
+- : __HYPERCALL_ENTRY(name) \
++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
+ : __HYPERCALL_CLOBBER1); \
+ (type)__res; \
+ })
+@@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ __HYPERCALL_2ARG(a1, a2); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_2PARAM \
+- : __HYPERCALL_ENTRY(name) \
++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
+ : __HYPERCALL_CLOBBER2); \
+ (type)__res; \
+ })
+@@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ __HYPERCALL_3ARG(a1, a2, a3); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_3PARAM \
+- : __HYPERCALL_ENTRY(name) \
++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
+ : __HYPERCALL_CLOBBER3); \
+ (type)__res; \
+ })
+@@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
+ __HYPERCALL_4ARG(a1, a2, a3, a4); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_4PARAM \
+- : __HYPERCALL_ENTRY(name) \
++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
+ : __HYPERCALL_CLOBBER4); \
+ (type)__res; \
+ })
+@@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
+ __HYPERCALL_DECLS;
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+- if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+- return -EINVAL;
+-
+- asm volatile(CALL_NOSPEC
++ asm volatile(__HYPERCALL
+ : __HYPERCALL_5PARAM
+- : [thunk_target] "a" (&hypercall_page[call])
++ : __HYPERCALL_ENTRY(call)
+ : __HYPERCALL_CLOBBER5);
+
+ return (long)__res;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 93b5cd12b9d4c5..dbaea8a6175b51 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -798,7 +798,7 @@ void detect_ht(struct cpuinfo_x86 *c)
+ #endif
+ }
+
+-static void get_cpu_vendor(struct cpuinfo_x86 *c)
++void get_cpu_vendor(struct cpuinfo_x86 *c)
+ {
+ char *v = c->x86_vendor_id;
+ int i;
+@@ -1522,15 +1522,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ detect_nopl();
+ }
+
+-void __init early_cpu_init(void)
++void __init init_cpu_devs(void)
+ {
+ const struct cpu_dev *const *cdev;
+ int count = 0;
+
+-#ifdef CONFIG_PROCESSOR_SELECT
+- pr_info("KERNEL supported cpus:\n");
+-#endif
+-
+ for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
+ const struct cpu_dev *cpudev = *cdev;
+
+@@ -1538,20 +1534,30 @@ void __init early_cpu_init(void)
+ break;
+ cpu_devs[count] = cpudev;
+ count++;
++ }
++}
+
++void __init early_cpu_init(void)
++{
+ #ifdef CONFIG_PROCESSOR_SELECT
+- {
+- unsigned int j;
+-
+- for (j = 0; j < 2; j++) {
+- if (!cpudev->c_ident[j])
+- continue;
+- pr_info(" %s %s\n", cpudev->c_vendor,
+- cpudev->c_ident[j]);
+- }
+- }
++ unsigned int i, j;
++
++ pr_info("KERNEL supported cpus:\n");
+ #endif
++
++ init_cpu_devs();
++
++#ifdef CONFIG_PROCESSOR_SELECT
++ for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
++ for (j = 0; j < 2; j++) {
++ if (!cpu_devs[i]->c_ident[j])
++ continue;
++ pr_info(" %s %s\n", cpu_devs[i]->c_vendor,
++ cpu_devs[i]->c_ident[j]);
++ }
+ }
++#endif
++
+ early_identify_cpu(&boot_cpu_data);
+ }
+
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 0e0221271e72dd..54d87bc2af7b0e 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -2,6 +2,7 @@
+ #include <linux/static_call.h>
+ #include <linux/memory.h>
+ #include <linux/bug.h>
++#include <asm/sync_core.h>
+ #include <asm/text-patching.h>
+
+ enum insn_type {
+@@ -165,6 +166,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+ }
+ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+
++noinstr void __static_call_update_early(void *tramp, void *func)
++{
++ BUG_ON(system_state != SYSTEM_BOOTING);
++ BUG_ON(!early_boot_irqs_disabled);
++ BUG_ON(static_call_initialized);
++ __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
++ sync_core();
++}
++
+ #ifdef CONFIG_RETHUNK
+ /*
+ * This is called by apply_returns() to fix up static call trampolines,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 95d970359e1746..c366ba8c431090 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -5,6 +5,7 @@
+ #endif
+ #include <linux/console.h>
+ #include <linux/cpu.h>
++#include <linux/instrumentation.h>
+ #include <linux/kexec.h>
+ #include <linux/slab.h>
+ #include <linux/panic_notifier.h>
+@@ -25,7 +26,8 @@
+ #include "smp.h"
+ #include "pmu.h"
+
+-EXPORT_SYMBOL_GPL(hypercall_page);
++DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
++EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
+
+ /*
+ * Pointer to the xen_vcpu_info structure or
+@@ -99,6 +101,67 @@ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+ */
+ int xen_have_vcpu_info_placement = 1;
+
++static __ref void xen_get_vendor(void)
++{
++ init_cpu_devs();
++ cpu_detect(&boot_cpu_data);
++ get_cpu_vendor(&boot_cpu_data);
++}
++
++void xen_hypercall_setfunc(void)
++{
++ if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
++ return;
++
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++ static_call_update(xen_hypercall, xen_hypercall_amd);
++ else
++ static_call_update(xen_hypercall, xen_hypercall_intel);
++}
++
++/*
++ * Evaluate processor vendor in order to select the correct hypercall
++ * function for HVM/PVH guests.
++ * Might be called very early in boot before vendor has been set by
++ * early_cpu_init().
++ */
++noinstr void *__xen_hypercall_setfunc(void)
++{
++ void (*func)(void);
++
++ /*
++ * Xen is supported only on CPUs with CPUID, so testing for
++ * X86_FEATURE_CPUID is a test for early_cpu_init() having been
++ * run.
++ *
++ * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
++ * dependency chain: it is being called via the xen_hypercall static
++ * call when running as a PVH or HVM guest. Hypercalls need to be
++ * noinstr due to PV guests using hypercalls in noinstr code. So we
++ * can safely tag the function body as "instrumentation ok", since
++ * the PV guest requirement is not of interest here (xen_get_vendor()
++ * calls noinstr functions, and static_call_update_early() might do
++ * so, too).
++ */
++ instrumentation_begin();
++
++ if (!boot_cpu_has(X86_FEATURE_CPUID))
++ xen_get_vendor();
++
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++ func = xen_hypercall_amd;
++ else
++ func = xen_hypercall_intel;
++
++ static_call_update_early(xen_hypercall, func);
++
++ instrumentation_end();
++
++ return func;
++}
++
+ static int xen_cpu_up_online(unsigned int cpu)
+ {
+ xen_init_lock_cpu(cpu);
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index e68ea5f4ad1ce0..f9020581bf6f74 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -101,15 +101,8 @@ static void __init init_hvm_pv_info(void)
+ /* PVH set up hypercall page in xen_prepare_pvh(). */
+ if (xen_pvh_domain())
+ pv_info.name = "Xen PVH";
+- else {
+- u64 pfn;
+- uint32_t msr;
+-
++ else
+ pv_info.name = "Xen HVM";
+- msr = cpuid_ebx(base + 2);
+- pfn = __pa(hypercall_page);
+- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+- }
+
+ xen_setup_features();
+
+@@ -284,6 +277,10 @@ static uint32_t __init xen_platform_hvm(void)
+ if (xen_pv_domain())
+ return 0;
+
++ /* Set correct hypercall function. */
++ if (xen_domain)
++ xen_hypercall_setfunc();
++
+ if (xen_pvh_domain() && nopv) {
+ /* Guest booting via the Xen-PVH boot entry goes here */
+ pr_info("\"nopv\" parameter is ignored in PVH guest\n");
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 998db0257e2ad3..ce6df600f8fb3c 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1207,6 +1207,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
+
+ xen_domain_type = XEN_PV_DOMAIN;
+ xen_start_flags = xen_start_info->flags;
++ /* Interrupts are guaranteed to be off initially. */
++ early_boot_irqs_disabled = true;
++ static_call_update_early(xen_hypercall, xen_hypercall_pv);
+
+ xen_setup_features();
+
+@@ -1304,7 +1307,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
+
+ local_irq_disable();
+- early_boot_irqs_disabled = true;
+
+ xen_raw_console_write("mapping kernel into physical memory\n");
+ xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index ada3868c02c231..00ee9399fd0c32 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -27,17 +27,10 @@ EXPORT_SYMBOL_GPL(xen_pvh);
+
+ void __init xen_pvh_init(struct boot_params *boot_params)
+ {
+- u32 msr;
+- u64 pfn;
+-
+ xen_pvh = 1;
+ xen_domain_type = XEN_HVM_DOMAIN;
+ xen_start_flags = pvh_start_info.flags;
+
+- msr = cpuid_ebx(xen_cpuid_base() + 2);
+- pfn = __pa(hypercall_page);
+- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+-
+ if (xen_initial_domain())
+ x86_init.oem.arch_setup = xen_add_preferred_consoles;
+ x86_init.oem.banner = xen_banner;
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 1b757a1ee1bb62..045760ddac6abe 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -20,8 +20,30 @@
+
+ #include <linux/init.h>
+ #include <linux/linkage.h>
++#include <linux/objtool.h>
+ #include <../entry/calling.h>
+
++/*
++ * PV hypercall interface to the hypervisor.
++ *
++ * Called via inline asm(), so better preserve %rcx and %r11.
++ *
++ * Input:
++ * %eax: hypercall number
++ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %rax
++ */
++SYM_FUNC_START(xen_hypercall_pv)
++ push %rcx
++ push %r11
++ UNWIND_HINT_SAVE
++ syscall
++ UNWIND_HINT_RESTORE
++ pop %r11
++ pop %rcx
++ RET
++SYM_FUNC_END(xen_hypercall_pv)
++
+ /*
+ * Enable events. This clears the event mask and tests the pending
+ * event status with one and operation. If there are pending events,
+@@ -170,7 +192,6 @@ SYM_CODE_START(xen_early_idt_handler_array)
+ SYM_CODE_END(xen_early_idt_handler_array)
+ __FINIT
+
+-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+ /*
+ * Xen64 iret frame:
+ *
+@@ -180,16 +201,27 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+ * cs
+ * rip <-- standard iret frame
+ *
+- * flags
++ * flags <-- xen_iret must push from here on
+ *
+- * rcx }
+- * r11 }<-- pushed by hypercall page
+- * rsp->rax }
++ * rcx
++ * r11
++ * rsp->rax
+ */
++.macro xen_hypercall_iret
++ pushq $0 /* Flags */
++ push %rcx
++ push %r11
++ push %rax
++ mov $__HYPERVISOR_iret, %eax
++ syscall /* Do the IRET. */
++#ifdef CONFIG_MITIGATION_SLS
++ int3
++#endif
++.endm
++
+ SYM_CODE_START(xen_iret)
+ UNWIND_HINT_EMPTY
+- pushq $0
+- jmp hypercall_iret
++ xen_hypercall_iret
+ SYM_CODE_END(xen_iret)
+
+ /*
+@@ -290,8 +322,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat)
+ UNWIND_HINT_ENTRY
+ lea 16(%rsp), %rsp /* strip %rcx, %r11 */
+ mov $-ENOSYS, %rax
+- pushq $0
+- jmp hypercall_iret
++ xen_hypercall_iret
+ SYM_CODE_END(xen_entry_SYSENTER_compat)
+ SYM_CODE_END(xen_entry_SYSCALL_compat)
+
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 2a3ef5fcba34b9..152bbe900a174a 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -6,9 +6,11 @@
+
+ #include <linux/elfnote.h>
+ #include <linux/init.h>
++#include <linux/instrumentation.h>
+
+ #include <asm/boot.h>
+ #include <asm/asm.h>
++#include <asm/frame.h>
+ #include <asm/msr.h>
+ #include <asm/page_types.h>
+ #include <asm/percpu.h>
+@@ -64,23 +66,85 @@ SYM_CODE_END(asm_cpu_bringup_and_idle)
+ #endif
+ #endif
+
+-.pushsection .text
+- .balign PAGE_SIZE
+-SYM_CODE_START(hypercall_page)
+- .rept (PAGE_SIZE / 32)
+- UNWIND_HINT_FUNC
+- ANNOTATE_UNRET_SAFE
+- ret
+- .skip 31, 0xcc
+- .endr
+-
+-#define HYPERCALL(n) \
+- .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
+- .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
+-#include <asm/xen-hypercalls.h>
+-#undef HYPERCALL
+-SYM_CODE_END(hypercall_page)
+-.popsection
++ .pushsection .text
++/*
++ * Xen hypercall interface to the hypervisor.
++ *
++ * Input:
++ * %eax: hypercall number
++ * 32-bit:
++ * %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
++ * 64-bit:
++ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %[er]ax
++ */
++SYM_FUNC_START(xen_hypercall_hvm)
++ FRAME_BEGIN
++ /* Save all relevant registers (caller save and arguments). */
++#ifdef CONFIG_X86_32
++ push %eax
++ push %ebx
++ push %ecx
++ push %edx
++ push %esi
++ push %edi
++#else
++ push %rax
++ push %rcx
++ push %rdx
++ push %rdi
++ push %rsi
++ push %r11
++ push %r10
++ push %r9
++ push %r8
++#ifdef CONFIG_FRAME_POINTER
++ pushq $0 /* Dummy push for stack alignment. */
++#endif
++#endif
++ /* Set the vendor specific function. */
++ call __xen_hypercall_setfunc
++ /* Set ZF = 1 if AMD, Restore saved registers. */
++#ifdef CONFIG_X86_32
++ lea xen_hypercall_amd, %ebx
++ cmp %eax, %ebx
++ pop %edi
++ pop %esi
++ pop %edx
++ pop %ecx
++ pop %ebx
++ pop %eax
++#else
++ lea xen_hypercall_amd(%rip), %rbx
++ cmp %rax, %rbx
++#ifdef CONFIG_FRAME_POINTER
++ pop %rax /* Dummy pop. */
++#endif
++ pop %r8
++ pop %r9
++ pop %r10
++ pop %r11
++ pop %rsi
++ pop %rdi
++ pop %rdx
++ pop %rcx
++ pop %rax
++#endif
++ /* Use correct hypercall function. */
++ jz xen_hypercall_amd
++ jmp xen_hypercall_intel
++SYM_FUNC_END(xen_hypercall_hvm)
++
++SYM_FUNC_START(xen_hypercall_amd)
++ vmmcall
++ RET
++SYM_FUNC_END(xen_hypercall_amd)
++
++SYM_FUNC_START(xen_hypercall_intel)
++ vmcall
++ RET
++SYM_FUNC_END(xen_hypercall_intel)
++ .popsection
+
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
+@@ -95,7 +159,6 @@ SYM_CODE_END(hypercall_page)
+ #ifdef CONFIG_XEN_PV
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
+ #endif
+- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
+ .ascii "!writable_page_tables|pae_pgdir_above_4gb")
+ ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 71f31032c635f5..e0b877c8a7569e 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -164,4 +164,13 @@ void xen_hvm_post_suspend(int suspend_cancelled);
+ static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
+ #endif
+
++#ifdef CONFIG_XEN_PV
++void xen_hypercall_pv(void);
++#endif
++void xen_hypercall_hvm(void);
++void xen_hypercall_amd(void);
++void xen_hypercall_intel(void);
++void xen_hypercall_setfunc(void);
++void *__xen_hypercall_setfunc(void);
++
+ #endif /* XEN_OPS_H */
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 9654d1c2c20f82..ba23562abc80c5 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1087,7 +1087,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
+ iocg->child_active_sum);
+ } else {
+- inuse = clamp_t(u32, inuse, 1, active);
++ /*
++ * It may be tempting to turn this into a clamp expression with
++ * a lower limit of 1 but active may be 0, which cannot be used
++ * as an upper limit in that situation. This expression allows
++ * active to clamp inuse unless it is 0, in which case inuse
++ * becomes 1.
++ */
++ inuse = min(inuse, active) ?: 1;
+ }
+
+ iocg->last_inuse = iocg->inuse;
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index b1ff0a8f9c14fe..7672d70da850dd 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -201,8 +201,6 @@ acpi_remove_address_space_handler(acpi_handle device,
+
+ /* Now we can delete the handler object */
+
+- acpi_os_release_mutex(handler_obj->address_space.
+- context_mutex);
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 2575d6c51f898b..9cc83df22de495 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ if (cmd_rc)
+ *cmd_rc = -EINVAL;
+
+- if (cmd == ND_CMD_CALL)
++ if (cmd == ND_CMD_CALL) {
++ if (!buf || buf_len < sizeof(*call_pkg))
++ return -EINVAL;
++
+ call_pkg = buf;
++ }
++
+ func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
+ if (func < 0)
+ return func;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 3c9f9398152390..0433ab8ced0dfd 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
+ switch (addr->resource_type) {
+ case ACPI_MEMORY_RANGE:
+ acpi_dev_memresource_flags(res, len, wp);
++
++ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
++ res->flags |= IORESOURCE_PREFETCH;
+ break;
+ case ACPI_IO_RANGE:
+ acpi_dev_ioresource_flags(res, len, iodec,
+@@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
+ if (addr->producer_consumer == ACPI_PRODUCER)
+ res->flags |= IORESOURCE_WINDOW;
+
+- if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+- res->flags |= IORESOURCE_PREFETCH;
+-
+ return !(res->flags & IORESOURCE_DISABLED);
+ }
+
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
+index f9bb3be4b939e4..fcac76192701f5 100644
+--- a/drivers/ata/sata_highbank.c
++++ b/drivers/ata/sata_highbank.c
+@@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
+ phy_nodes[phy] = phy_data.np;
+ cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
+ if (cphy_base[phy] == NULL) {
++ of_node_put(phy_data.np);
+ return 0;
+ }
+ phy_count += 1;
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
+index 762127dd56c538..70a854557e6ec5 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.c
++++ b/drivers/gpu/drm/i915/i915_scheduler.c
+@@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
+ return 0;
+
+ err_priorities:
+- kmem_cache_destroy(slab_priorities);
++ kmem_cache_destroy(slab_dependencies);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 67d153cc6a6c0f..75499e2967e8f9 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1384,6 +1384,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
+
+ #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
++ NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+ #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index ecea3cdd30b3f7..cceb1cbe0d6a3b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -2084,7 +2084,7 @@ void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks);
+ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+- unsigned int naddr, u8 *addr);
++ u8 start, unsigned int naddr, u8 *addr);
+ void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok);
+ void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 0d9cda4ab30374..21afaa81697e69 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -3247,7 +3247,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+
+ dev_info(pi->adapter->pdev_dev,
+ "Setting MAC %pM on VF %d\n", mac, vf);
+- ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
++ ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
+ if (!ret)
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
+ return ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index c99f5920c957c6..94fd7d6a1109d3 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -10215,11 +10215,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ * t4_set_vf_mac_acl - Set MAC address for the specified VF
+ * @adapter: The adapter
+ * @vf: one of the VFs instantiated by the specified PF
++ * @start: The start port id associated with specified VF
+ * @naddr: the number of MAC addresses
+ * @addr: the MAC address(es) to be set to the specified VF
+ */
+ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+- unsigned int naddr, u8 *addr)
++ u8 start, unsigned int naddr, u8 *addr)
+ {
+ struct fw_acl_mac_cmd cmd;
+
+@@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ cmd.nmac = naddr;
+
+- switch (adapter->pf) {
++ switch (start) {
+ case 3:
+ memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
+ break;
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index 174d89ee637492..ccfad6fba5b4b3 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -647,12 +647,11 @@ static int sparx5_start(struct sparx5 *sparx5)
+ err = -ENXIO;
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+- err = devm_request_threaded_irq(sparx5->dev,
+- sparx5->fdma_irq,
+- NULL,
+- sparx5_fdma_handler,
+- IRQF_ONESHOT,
+- "sparx5-fdma", sparx5);
++ err = devm_request_irq(sparx5->dev,
++ sparx5->fdma_irq,
++ sparx5_fdma_handler,
++ 0,
++ "sparx5-fdma", sparx5);
+ if (!err)
+ err = sparx5_fdma_start(sparx5);
+ if (err)
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+index 8561a7bf53e192..0d9ff3db3144f9 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+@@ -1113,7 +1113,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
+ spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+ devinst,
+- DEV10G_MAC_ENA_CFG(0));
++ DEV10G_MAC_MAXLEN_CFG(0));
+
+ /* Handle Signal Detect in 10G PCS */
+ spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 385e4c62ca03c0..9b34ab54784018 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -67,7 +67,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
+
+ #define QCASPI_PLUGGABLE_MIN 0
+ #define QCASPI_PLUGGABLE_MAX 1
+-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
++static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
+ module_param(qcaspi_pluggable, int, 0);
+ MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
+
+@@ -829,7 +829,6 @@ qcaspi_netdev_init(struct net_device *dev)
+
+ dev->mtu = QCAFRM_MAX_MTU;
+ dev->type = ARPHRD_ETHER;
+- qca->clkspeed = qcaspi_clkspeed;
+ qca->burst_len = qcaspi_burst_len;
+ qca->spi_thread = NULL;
+ qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+@@ -918,17 +917,15 @@ qca_spi_probe(struct spi_device *spi)
+ legacy_mode = of_property_read_bool(spi->dev.of_node,
+ "qca,legacy-mode");
+
+- if (qcaspi_clkspeed == 0) {
+- if (spi->max_speed_hz)
+- qcaspi_clkspeed = spi->max_speed_hz;
+- else
+- qcaspi_clkspeed = QCASPI_CLK_SPEED;
+- }
++ if (qcaspi_clkspeed)
++ spi->max_speed_hz = qcaspi_clkspeed;
++ else if (!spi->max_speed_hz)
++ spi->max_speed_hz = QCASPI_CLK_SPEED;
+
+- if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
+- (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
+- dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+- qcaspi_clkspeed);
++ if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
++ spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
++ dev_err(&spi->dev, "Invalid clkspeed: %u\n",
++ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+@@ -953,14 +950,13 @@ qca_spi_probe(struct spi_device *spi)
+ return -EINVAL;
+ }
+
+- dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
++ dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
+ QCASPI_DRV_VERSION,
+- qcaspi_clkspeed,
++ spi->max_speed_hz,
+ qcaspi_burst_len,
+ qcaspi_pluggable);
+
+ spi->mode = SPI_MODE_3;
+- spi->max_speed_hz = qcaspi_clkspeed;
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device\n");
+ return -EFAULT;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index 58ad910068d4bc..b3b17bd46e12c2 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -101,7 +101,6 @@ struct qcaspi {
+ #endif
+
+ /* user configurable options */
+- u32 clkspeed;
+ u8 legacy_mode;
+ u16 burst_len;
+ };
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 8edfc8984e2c91..5e5af71a85ac19 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -979,7 +979,8 @@ static void team_port_disable(struct team *team,
+
+ #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
+- NETIF_F_HIGHDMA | NETIF_F_LRO)
++ NETIF_F_HIGHDMA | NETIF_F_LRO | \
++ NETIF_F_GSO_ENCAP_ALL)
+
+ #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 8b4be2e4d1cf2d..0add51f1b7e4d9 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -869,7 +869,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
+ static int xennet_close(struct net_device *dev)
+ {
+ struct netfront_info *np = netdev_priv(dev);
+- unsigned int num_queues = dev->real_num_tx_queues;
++ unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
+ unsigned int i;
+ struct netfront_queue *queue;
+ netif_tx_stop_all_queues(np->netdev);
+@@ -884,6 +884,9 @@ static void xennet_destroy_queues(struct netfront_info *info)
+ {
+ unsigned int i;
+
++ if (!info->queues)
++ return;
++
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+diff --git a/drivers/ptp/ptp_kvm_arm.c b/drivers/ptp/ptp_kvm_arm.c
+index b7d28c8dfb84e3..e68e6943167beb 100644
+--- a/drivers/ptp/ptp_kvm_arm.c
++++ b/drivers/ptp/ptp_kvm_arm.c
+@@ -22,6 +22,10 @@ int kvm_arch_ptp_init(void)
+ return 0;
+ }
+
++void kvm_arch_ptp_exit(void)
++{
++}
++
+ int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+ {
+ return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL);
+diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c
+index fcae32f56f25a2..051114a592865d 100644
+--- a/drivers/ptp/ptp_kvm_common.c
++++ b/drivers/ptp/ptp_kvm_common.c
+@@ -130,6 +130,7 @@ static struct kvm_ptp_clock kvm_ptp_clock;
+ static void __exit ptp_kvm_exit(void)
+ {
+ ptp_clock_unregister(kvm_ptp_clock.ptp_clock);
++ kvm_arch_ptp_exit();
+ }
+
+ static int __init ptp_kvm_init(void)
+diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
+index 4991054a213506..5e5b2ef785474f 100644
+--- a/drivers/ptp/ptp_kvm_x86.c
++++ b/drivers/ptp/ptp_kvm_x86.c
+@@ -14,27 +14,64 @@
+ #include <uapi/linux/kvm_para.h>
+ #include <linux/ptp_clock_kernel.h>
+ #include <linux/ptp_kvm.h>
++#include <linux/set_memory.h>
+
+ static phys_addr_t clock_pair_gpa;
+-static struct kvm_clock_pairing clock_pair;
++static struct kvm_clock_pairing clock_pair_glbl;
++static struct kvm_clock_pairing *clock_pair;
+
+ int kvm_arch_ptp_init(void)
+ {
++ struct page *p;
+ long ret;
+
+ if (!kvm_para_available())
+- return -ENODEV;
++ return -EOPNOTSUPP;
++
++ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
++ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!p)
++ return -ENOMEM;
++
++ clock_pair = page_address(p);
++ ret = set_memory_decrypted((unsigned long)clock_pair, 1);
++ if (ret) {
++ __free_page(p);
++ clock_pair = NULL;
++ goto nofree;
++ }
++ } else {
++ clock_pair = &clock_pair_glbl;
++ }
+
+- clock_pair_gpa = slow_virt_to_phys(&clock_pair);
+- if (!pvclock_get_pvti_cpu0_va())
+- return -ENODEV;
++ clock_pair_gpa = slow_virt_to_phys(clock_pair);
++ if (!pvclock_get_pvti_cpu0_va()) {
++ ret = -EOPNOTSUPP;
++ goto err;
++ }
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+- if (ret == -KVM_ENOSYS)
+- return -ENODEV;
++ if (ret == -KVM_ENOSYS) {
++ ret = -EOPNOTSUPP;
++ goto err;
++ }
+
+ return ret;
++
++err:
++ kvm_arch_ptp_exit();
++nofree:
++ return ret;
++}
++
++void kvm_arch_ptp_exit(void)
++{
++ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
++ WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1));
++ free_page((unsigned long)clock_pair);
++ clock_pair = NULL;
++ }
+ }
+
+ int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+@@ -49,8 +86,8 @@ int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+ return -EOPNOTSUPP;
+ }
+
+- ts->tv_sec = clock_pair.sec;
+- ts->tv_nsec = clock_pair.nsec;
++ ts->tv_sec = clock_pair->sec;
++ ts->tv_nsec = clock_pair->nsec;
+
+ return 0;
+ }
+@@ -81,9 +118,9 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
+ pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
+ return -EOPNOTSUPP;
+ }
+- tspec->tv_sec = clock_pair.sec;
+- tspec->tv_nsec = clock_pair.nsec;
+- *cycle = __pvclock_read_cycles(src, clock_pair.tsc);
++ tspec->tv_sec = clock_pair->sec;
++ tspec->tv_nsec = clock_pair->nsec;
++ *cycle = __pvclock_read_cycles(src, clock_pair->tsc);
+ } while (pvclock_read_retry(src, version));
+
+ *cs = &kvm_clock;
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index f6195f44e548a7..c4d90c0f518073 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -3579,11 +3579,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ }
+
+- if (!hsotg->flags.b.port_connect_status) {
++ if (dwc2_is_device_mode(hsotg)) {
+ /*
+- * The port is disconnected, which means the core is
+- * either in device mode or it soon will be. Just
+- * return 0's for the remainder of the port status
++ * Just return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+@@ -3653,13 +3651,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
+ goto error;
+
+- if (!hsotg->flags.b.port_connect_status) {
++ if (dwc2_is_device_mode(hsotg)) {
+ /*
+- * The port is disconnected, which means the core is
+- * either in device mode or it soon will be. Just
+- * return without doing anything since the port
+- * register can't be written if the core is in device
+- * mode.
++ * Just return 0's for the remainder of the port status
++ * since the port register can't be read if the core
++ * is in device mode.
+ */
+ break;
+ }
+@@ -4382,7 +4378,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+ if (hsotg->bus_suspended)
+ goto skip_power_saving;
+
+- if (hsotg->flags.b.port_connect_status == 0)
++ if (!(dwc2_read_hprt0(hsotg) & HPRT0_CONNSTS))
+ goto skip_power_saving;
+
+ switch (hsotg->params.power_down) {
+@@ -4464,6 +4460,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
+ * Power Down mode.
+ */
+ if (hprt0 & HPRT0_CONNSTS) {
++ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ hsotg->lx_state = DWC2_L0;
+ goto unlock;
+ }
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 5111fcc0cac395..37ba396d5473fb 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -570,9 +570,12 @@ static int gs_start_io(struct gs_port *port)
+ * we didn't in gs_start_tx() */
+ tty_wakeup(port->port.tty);
+ } else {
+- gs_free_requests(ep, head, &port->read_allocated);
+- gs_free_requests(port->port_usb->in, &port->write_pool,
+- &port->write_allocated);
++ /* Free reqs only if we are still connected */
++ if (port->port_usb) {
++ gs_free_requests(ep, head, &port->read_allocated);
++ gs_free_requests(port->port_usb->in, &port->write_pool,
++ &port->write_allocated);
++ }
+ status = -EIO;
+ }
+
+diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
+index c25c51d26f2603..395913113686e1 100644
+--- a/drivers/usb/host/ehci-sh.c
++++ b/drivers/usb/host/ehci-sh.c
+@@ -120,8 +120,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->iclk))
+ priv->iclk = NULL;
+
+- clk_enable(priv->fclk);
+- clk_enable(priv->iclk);
++ ret = clk_enable(priv->fclk);
++ if (ret)
++ goto fail_request_resource;
++ ret = clk_enable(priv->iclk);
++ if (ret)
++ goto fail_iclk;
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret != 0) {
+@@ -137,6 +141,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
+
+ fail_add_hcd:
+ clk_disable(priv->iclk);
++fail_iclk:
+ clk_disable(priv->fclk);
+
+ fail_request_resource:
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index 994dc562b2db0c..6d95b90683bc8b 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -785,11 +785,17 @@ max3421_check_unlink(struct usb_hcd *hcd)
+ retval = 1;
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+- usb_hcd_unlink_urb_from_ep(hcd, urb);
+- spin_unlock_irqrestore(&max3421_hcd->lock,
+- flags);
+- usb_hcd_giveback_urb(hcd, urb, 0);
+- spin_lock_irqsave(&max3421_hcd->lock, flags);
++ if (urb == max3421_hcd->curr_urb) {
++ max3421_hcd->urb_done = 1;
++ max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
++ BIT(MAX3421_HI_RCVDAV_BIT));
++ } else {
++ usb_hcd_unlink_urb_from_ep(hcd, urb);
++ spin_unlock_irqrestore(&max3421_hcd->lock,
++ flags);
++ usb_hcd_giveback_urb(hcd, urb, 0);
++ spin_lock_irqsave(&max3421_hcd->lock, flags);
++ }
+ }
+ }
+ }
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index be7570d01ae1ad..0a1b1de032efad 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -878,7 +878,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+
+ num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
+ if (num_bh > ARRAY_SIZE(es->__bh)) {
+- es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL);
++ es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS);
+ if (!es->bh) {
+ brelse(bh);
+ kfree(es);
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index dffe4ca5849357..13032aec47ee84 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -3413,14 +3413,31 @@ xfs_btree_insrec(
+ xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+
+ /*
+- * If we just inserted into a new tree block, we have to
+- * recalculate nkey here because nkey is out of date.
++ * Update btree keys to reflect the newly added record or keyptr.
++ * There are three cases here to be aware of. Normally, all we have to
++ * do is walk towards the root, updating keys as necessary.
+ *
+- * Otherwise we're just updating an existing block (having shoved
+- * some records into the new tree block), so use the regular key
+- * update mechanism.
++ * If the caller had us target a full block for the insertion, we dealt
++ * with that by calling the _make_block_unfull function. If the
++ * "make unfull" function splits the block, it'll hand us back the key
++ * and pointer of the new block. We haven't yet added the new block to
++ * the next level up, so if we decide to add the new record to the new
++ * block (bp->b_bn != old_bn), we have to update the caller's pointer
++ * so that the caller adds the new block with the correct key.
++ *
++ * However, there is a third possibility-- if the selected block is the
++ * root block of an inode-rooted btree and cannot be expanded further,
++ * the "make unfull" function moves the root block contents to a new
++ * block and updates the root block to point to the new block. In this
++ * case, no block pointer is passed back because the block has already
++ * been added to the btree. In this case, we need to use the regular
++ * key update function, just like the first case. This is critical for
++ * overlapping btrees, because the high key must be updated to reflect
++ * the entire tree, not just the subtree accessible through the first
++ * child of the root (which is now two levels down from the root).
+ */
+- if (bp && xfs_buf_daddr(bp) != old_bn) {
++ if (!xfs_btree_ptr_is_null(cur, &nptr) &&
++ bp && xfs_buf_daddr(bp) != old_bn) {
+ xfs_btree_get_keys(cur, block, lkey);
+ } else if (xfs_btree_needs_key_update(cur, optr)) {
+ error = xfs_btree_update_keys(cur, level);
+diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
+index f0b38f4aba80be..90755c8213a2c6 100644
+--- a/fs/xfs/libxfs/xfs_symlink_remote.c
++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
+@@ -89,8 +89,10 @@ xfs_symlink_verify(
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_dsymlink_hdr *dsl = bp->b_addr;
+
++ /* no verification of non-crc buffers */
+ if (!xfs_has_crc(mp))
+- return __this_address;
++ return NULL;
++
+ if (!xfs_verify_magic(bp, dsl->sl_magic))
+ return __this_address;
+ if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
+diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
+index a7bbb84f91a7f5..8756786435d014 100644
+--- a/fs/xfs/scrub/trace.h
++++ b/fs/xfs/scrub/trace.h
+@@ -464,7 +464,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
+ __entry->dev = sc->mp->m_super->s_dev;
+- __entry->ino = sc->ip->i_ino;
++ __entry->ino = cur->bc_ino.ip->i_ino;
+ __entry->whichfork = cur->bc_ino.whichfork;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 8cd0c3df253f9e..bda195c7bfac6e 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -1193,6 +1193,14 @@ xfs_file_remap_range(
+ xfs_iunlock2_io_mmap(src, dest);
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
++ /*
++ * If the caller did not set CAN_SHORTEN, then it is not prepared to
++ * handle partial results -- either the whole remap succeeds, or we
++ * must say why it did not. In this case, any error should be returned
++ * to the caller.
++ */
++ if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
++ return ret;
+ return remapped > 0 ? remapped : ret;
+ }
+
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 65111de4ad6b55..65c9c0d92f4952 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -222,6 +222,23 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+
+ #endif /* __KERNEL__ */
+
++/**
++ * offset_to_ptr - convert a relative memory offset to an absolute pointer
++ * @off: the address of the 32-bit offset value
++ */
++static inline void *offset_to_ptr(const int *off)
++{
++ return (void *)((unsigned long)off + *off);
++}
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef CONFIG_64BIT
++#define ARCH_SEL(a,b) a
++#else
++#define ARCH_SEL(a,b) b
++#endif
++
+ /*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+@@ -232,16 +249,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ static void * __section(".discard.addressable") __used \
+ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
+
+-/**
+- * offset_to_ptr - convert a relative memory offset to an absolute pointer
+- * @off: the address of the 32-bit offset value
+- */
+-static inline void *offset_to_ptr(const int *off)
+-{
+- return (void *)((unsigned long)off + *off);
+-}
++#define __ADDRESSABLE_ASM(sym) \
++ .pushsection .discard.addressable,"aw"; \
++ .align ARCH_SEL(8,4); \
++ ARCH_SEL(.quad, .long) __stringify(sym); \
++ .popsection;
+
+-#endif /* __ASSEMBLY__ */
++#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
+
+ /* &a[0] degrades to a pointer: a different type from an array */
+ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h
+index f960a719f0d541..c1636ce76bd2a3 100644
+--- a/include/linux/ptp_kvm.h
++++ b/include/linux/ptp_kvm.h
+@@ -12,6 +12,7 @@ struct timespec64;
+ struct clocksource;
+
+ int kvm_arch_ptp_init(void);
++void kvm_arch_ptp_exit(void);
+ int kvm_arch_ptp_get_clock(struct timespec64 *ts);
+ int kvm_arch_ptp_get_crosststamp(u64 *cycle,
+ struct timespec64 *tspec, struct clocksource **cs);
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index fcc5b48989b3ca..25663740b53e85 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
+
+ #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
++extern bool static_call_initialized;
++
+ extern int __init static_call_init(void);
+
+ struct static_call_mod {
+@@ -215,6 +217,8 @@ extern long __static_call_return0(void);
+
+ #elif defined(CONFIG_HAVE_STATIC_CALL)
+
++#define static_call_initialized 0
++
+ static inline int static_call_init(void) { return 0; }
+
+ #define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+@@ -265,6 +269,8 @@ extern long __static_call_return0(void);
+
+ #else /* Generic implementation */
+
++#define static_call_initialized 0
++
+ static inline int static_call_init(void) { return 0; }
+
+ static inline long __static_call_return0(void)
+diff --git a/include/net/lapb.h b/include/net/lapb.h
+index 124ee122f2c8f8..6c07420644e45a 100644
+--- a/include/net/lapb.h
++++ b/include/net/lapb.h
+@@ -4,7 +4,7 @@
+ #include <linux/lapb.h>
+ #include <linux/refcount.h>
+
+-#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
++#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
+
+ #define LAPB_ACK_PENDING_CONDITION 0x01
+ #define LAPB_REJECT_CONDITION 0x02
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e29c0581f93ad7..7049a85a78ab22 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -9223,8 +9223,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
+ struct bpf_reg_state *reg;
+
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
+- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
++ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) {
++ s32 saved_subreg_def = reg->subreg_def;
+ copy_register_state(reg, known_reg);
++ reg->subreg_def = saved_subreg_def;
++ }
+ }));
+ }
+
+diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
+index 6f566fe27ec1d7..f0bd892ef15776 100644
+--- a/kernel/static_call_inline.c
++++ b/kernel/static_call_inline.c
+@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
+ extern struct static_call_tramp_key __start_static_call_tramp_key[],
+ __stop_static_call_tramp_key[];
+
+-static bool static_call_initialized;
++bool static_call_initialized;
+
+ /* mutex to protect key modules/sites */
+ static DEFINE_MUTEX(static_call_mutex);
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 21aef22a8489ca..baaaf9bc05f2b7 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1823,7 +1823,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+ int ret;
+ char *event;
+
+- if (func) {
++ if (func && !strchr(func, ':')) {
+ unsigned int count;
+
+ count = number_of_same_symbols(func);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 660a5594a647b0..6be14f9e071a6b 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -990,16 +990,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ int tt_diff_len, tt_change_len = 0;
+ int tt_diff_entries_num = 0;
+ int tt_diff_entries_count = 0;
++ bool drop_changes = false;
++ size_t tt_extra_len = 0;
+ u16 tvlv_len;
+
+ tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+
+ /* if we have too many changes for one packet don't send any
+- * and wait for the tt table request which will be fragmented
++ * and wait for the tt table request so we can reply with the full
++ * (fragmented) table.
++ *
++ * The local change history should still be cleaned up so the next
++ * TT round can start again with a clean state.
+ */
+- if (tt_diff_len > bat_priv->soft_iface->mtu)
++ if (tt_diff_len > bat_priv->soft_iface->mtu) {
+ tt_diff_len = 0;
++ tt_diff_entries_num = 0;
++ drop_changes = true;
++ }
+
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+ &tt_change, &tt_diff_len);
+@@ -1008,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+
+ tt_data->flags = BATADV_TT_OGM_DIFF;
+
+- if (tt_diff_len == 0)
++ if (!drop_changes && tt_diff_len == 0)
+ goto container_register;
+
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+@@ -1027,6 +1036,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ }
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+
++ tt_extra_len = batadv_tt_len(tt_diff_entries_num -
++ tt_diff_entries_count);
++
+ /* Keep the buffer for possible tt_request */
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ kfree(bat_priv->tt.last_changeset);
+@@ -1035,6 +1047,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ tt_change_len = batadv_tt_len(tt_diff_entries_count);
+ /* check whether this new OGM has no changes due to size problems */
+ if (tt_diff_entries_count > 0) {
++ tt_diff_len -= tt_extra_len;
+ /* if kmalloc() fails we will reply with the full table
+ * instead of providing the diff
+ */
+@@ -1047,6 +1060,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ }
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+
++ /* Remove extra packet space for OGM */
++ tvlv_len -= tt_extra_len;
+ container_register:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+ tvlv_len);
+@@ -2747,14 +2762,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
+ *
+ * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
+ * is not provided then this becomes a no-op.
++ *
++ * Return: Remaining unused length in tvlv_buff.
+ */
+-static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+- struct batadv_hashtable *hash,
+- void *tvlv_buff, u16 tt_len,
+- bool (*valid_cb)(const void *,
+- const void *,
+- u8 *flags),
+- void *cb_data)
++static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
++ struct batadv_hashtable *hash,
++ void *tvlv_buff, u16 tt_len,
++ bool (*valid_cb)(const void *,
++ const void *,
++ u8 *flags),
++ void *cb_data)
+ {
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tvlv_tt_change *tt_change;
+@@ -2768,7 +2785,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
+
+ if (!valid_cb)
+- return;
++ return tt_len;
+
+ rcu_read_lock();
+ for (i = 0; i < hash->size; i++) {
+@@ -2794,6 +2811,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ }
+ }
+ rcu_read_unlock();
++
++ return batadv_tt_len(tt_tot - tt_num_entries);
+ }
+
+ /**
+@@ -3069,10 +3088,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+- tt_change, tt_len,
+- batadv_tt_global_valid,
+- req_dst_orig_node);
++ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++ bat_priv->tt.global_hash,
++ tt_change, tt_len,
++ batadv_tt_global_valid,
++ req_dst_orig_node);
+ }
+
+ /* Don't send the response, if larger than fragmented packet. */
+@@ -3196,9 +3216,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+- tt_change, tt_len,
+- batadv_tt_local_valid, NULL);
++ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++ bat_priv->tt.local_hash,
++ tt_change, tt_len,
++ batadv_tt_local_valid,
++ NULL);
+ }
+
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 9ef58dd1139d3b..f591ec106cd6c6 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -158,6 +158,7 @@ static void sock_map_del_link(struct sock *sk,
+ verdict_stop = true;
+ list_del(&link->list);
+ sk_psock_free_link(link);
++ break;
+ }
+ }
+ spin_unlock_bh(&psock->link_lock);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 44eedae43eaa47..3a66d0c7d015cf 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -824,8 +824,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+ unsigned int size;
+
+ if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
+- opts->options |= OPTION_MPTCP;
+- remaining -= size;
++ if (remaining >= size) {
++ opts->options |= OPTION_MPTCP;
++ remaining -= size;
++ }
+ }
+ }
+
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 93ed7bac9ee60e..f459e34684ad36 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -77,6 +77,8 @@ struct netem_sched_data {
+ struct sk_buff *t_head;
+ struct sk_buff *t_tail;
+
++ u32 t_len;
++
+ /* optional qdisc for classful handling (NULL at netem init) */
+ struct Qdisc *qdisc;
+
+@@ -373,6 +375,7 @@ static void tfifo_reset(struct Qdisc *sch)
+ rtnl_kfree_skbs(q->t_head, q->t_tail);
+ q->t_head = NULL;
+ q->t_tail = NULL;
++ q->t_len = 0;
+ }
+
+ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+@@ -402,6 +405,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ rb_link_node(&nskb->rbnode, parent, p);
+ rb_insert_color(&nskb->rbnode, &q->t_root);
+ }
++ q->t_len++;
+ sch->q.qlen++;
+ }
+
+@@ -508,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 1<<(prandom_u32() % 8);
+ }
+
+- if (unlikely(sch->q.qlen >= sch->limit)) {
++ if (unlikely(q->t_len >= sch->limit)) {
+ /* re-link segs, so that qdisc_drop_all() frees them all */
+ skb->next = segs;
+ qdisc_drop_all(skb, sch, to_free);
+@@ -692,8 +696,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ tfifo_dequeue:
+ skb = __qdisc_dequeue_head(&sch->q);
+ if (skb) {
+- qdisc_qstats_backlog_dec(sch, skb);
+ deliver:
++ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ return skb;
+ }
+@@ -709,8 +713,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+
+ if (time_to_send <= now && q->slot.slot_next <= now) {
+ netem_erase_head(q, skb);
+- sch->q.qlen--;
+- qdisc_qstats_backlog_dec(sch, skb);
++ q->t_len--;
+ skb->next = NULL;
+ skb->prev = NULL;
+ /* skb->dev shares skb->rbnode area,
+@@ -737,16 +740,21 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
++ sch->qstats.backlog -= pkt_len;
++ sch->q.qlen--;
+ }
+ goto tfifo_dequeue;
+ }
++ sch->q.qlen--;
+ goto deliver;
+ }
+
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+- if (skb)
++ if (skb) {
++ sch->q.qlen--;
+ goto deliver;
++ }
+ }
+
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+@@ -756,8 +764,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+- if (skb)
++ if (skb) {
++ sch->q.qlen--;
+ goto deliver;
++ }
+ }
+ return NULL;
+ }
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 3f5a12b85b2d3e..f5bd75d931c1b5 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -811,6 +811,7 @@ static void cleanup_bearer(struct work_struct *work)
+ {
+ struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+ struct udp_replicast *rcast, *tmp;
++ struct tipc_net *tn;
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ dst_cache_destroy(&rcast->dst_cache);
+@@ -818,10 +819,14 @@ static void cleanup_bearer(struct work_struct *work)
+ kfree_rcu(rcast, rcu);
+ }
+
++ tn = tipc_net(sock_net(ub->ubsock->sk));
++
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
++
++ /* Note: could use a call_rcu() to avoid another synchronize_net() */
+ synchronize_net();
+- atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
++ atomic_dec(&tn->wq_count);
+ kfree(ub);
+ }
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 6f67a596b75890..276993dd6416f8 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1196,6 +1196,14 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ return -ENOMEM;
+ }
+
++ /* __vsock_release() might have already flushed accept_queue.
++ * Subsequent enqueues would lead to a memory leak.
++ */
++ if (sk->sk_shutdown == SHUTDOWN_MASK) {
++ virtio_transport_reset_no_sock(t, pkt);
++ return -ESHUTDOWN;
++ }
++
+ child = vsock_create_connected(sk);
+ if (!child) {
+ virtio_transport_reset_no_sock(t, pkt);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9d98a0e6a9f4c4..15932d0a461307 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -591,7 +591,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
+ {
+ struct usb_host_config *config = dev->actconfig;
+- struct usb_device_descriptor new_device_descriptor;
++ struct usb_device_descriptor *new_device_descriptor = NULL;
+ int err;
+
+ if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
+@@ -602,15 +602,20 @@ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interfac
+ 0x10, 0x43, 0x0001, 0x000a, NULL, 0);
+ if (err < 0)
+ dev_dbg(&dev->dev, "error sending boot message: %d\n", err);
++
++ new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
++ if (!new_device_descriptor)
++ return -ENOMEM;
+ err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+- &new_device_descriptor, sizeof(new_device_descriptor));
++ new_device_descriptor, sizeof(*new_device_descriptor));
+ if (err < 0)
+ dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
+- if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++ if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
+ dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
+- new_device_descriptor.bNumConfigurations);
++ new_device_descriptor->bNumConfigurations);
+ else
+- memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
++ memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
++ kfree(new_device_descriptor);
+ err = usb_reset_configuration(dev);
+ if (err < 0)
+ dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err);
+@@ -942,7 +947,7 @@ static void mbox2_setup_48_24_magic(struct usb_device *dev)
+ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+ {
+ struct usb_host_config *config = dev->actconfig;
+- struct usb_device_descriptor new_device_descriptor;
++ struct usb_device_descriptor *new_device_descriptor = NULL;
+ int err;
+ u8 bootresponse[0x12];
+ int fwsize;
+@@ -977,15 +982,21 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+
+ dev_dbg(&dev->dev, "device initialised!\n");
+
++ new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
++ if (!new_device_descriptor)
++ return -ENOMEM;
++
+ err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+- &new_device_descriptor, sizeof(new_device_descriptor));
++ new_device_descriptor, sizeof(*new_device_descriptor));
+ if (err < 0)
+ dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
+- if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
++ if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
+ dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
+- new_device_descriptor.bNumConfigurations);
++ new_device_descriptor->bNumConfigurations);
+ else
+- memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
++ memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
++
++ kfree(new_device_descriptor);
+
+ err = usb_reset_configuration(dev);
+ if (err < 0)
+@@ -1768,6 +1779,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x046d, 0x09a4, /* Logitech QuickCam E 3500 */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
++ DEVICE_FLG(0x0499, 0x1506, /* Yamaha THR5 */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index fd6714de2260e6..c2596b1e1fb1e3 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3206,10 +3206,13 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ break;
+
+ case INSN_CONTEXT_SWITCH:
+- if (func && (!next_insn || !next_insn->hint)) {
+- WARN_FUNC("unsupported instruction in callable function",
+- sec, insn->offset);
+- return 1;
++ if (func) {
++ if (!next_insn || !next_insn->hint) {
++ WARN_FUNC("unsupported instruction in callable function",
++ sec, insn->offset);
++ return 1;
++ }
++ break;
+ }
+ return 0;
+
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+index 7d9e73a43a49b1..9c3c426197af57 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+@@ -108,11 +108,6 @@ port_pool_test()
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+- RET=0
+- max_occ=$(sb_occ_pool_check $dl_port1 $SB_POOL_ING $exp_max_occ)
+- check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+- log_test "physical port's($h1) ingress pool"
+-
+ RET=0
+ max_occ=$(sb_occ_pool_check $dl_port2 $SB_POOL_ING $exp_max_occ)
+ check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+@@ -136,11 +131,6 @@ port_tc_ip_test()
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+- RET=0
+- max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+- check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+- log_test "physical port's($h1) ingress TC - IP packet"
+-
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+@@ -167,11 +157,6 @@ port_tc_arp_test()
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+- RET=0
+- max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+- check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+- log_test "physical port's($h1) ingress TC - ARP packet"
+-
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"