diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-01-26 10:08:45 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-01-26 10:08:45 -0500 |
commit | ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9 (patch) | |
tree | 5950874d4bb042682624202420af30e92a6ffb83 | |
parent | proj/linux-patches: Linux patch 4.19.17 (diff) | |
download | linux-patches-ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9.tar.gz linux-patches-ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9.tar.bz2 linux-patches-ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9.zip |
proj/linux-patches: Linux patch 4.19.184.19-19
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1017_linux-4.19.18.patch | 4709 |
2 files changed, 4713 insertions, 0 deletions
diff --git a/0000_README b/0000_README index f7061f2c..776e758c 100644 --- a/0000_README +++ b/0000_README @@ -111,6 +111,10 @@ Patch: 1016_linux-4.19.17.patch From: http://www.kernel.org Desc: Linux 4.19.17 +Patch: 1017_linux-4.19.18.patch +From: http://www.kernel.org +Desc: Linux 4.19.18 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1017_linux-4.19.18.patch b/1017_linux-4.19.18.patch new file mode 100644 index 00000000..5cb666fe --- /dev/null +++ b/1017_linux-4.19.18.patch @@ -0,0 +1,4709 @@ +diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt +index 22b4b00dee31..06ac6dda9b34 100644 +--- a/Documentation/filesystems/proc.txt ++++ b/Documentation/filesystems/proc.txt +@@ -496,7 +496,9 @@ manner. The codes are the following: + + Note that there is no guarantee that every flag and associated mnemonic will + be present in all further kernel releases. Things get changed, the flags may +-be vanished or the reverse -- new added. ++be vanished or the reverse -- new added. Interpretation of their meaning ++might change in future as well. So each consumer of these flags has to ++follow each specific kernel version for the exact semantic. + + This file is only present if the CONFIG_MMU kernel configuration option is + enabled. +diff --git a/Makefile b/Makefile +index 4b0bce87a36b..9f37a8a9feb9 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 17 ++SUBLEVEL = 18 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h +index 0bcc98dbba56..f90f5d83b228 100644 +--- a/arch/arm64/include/asm/assembler.h ++++ b/arch/arm64/include/asm/assembler.h +@@ -378,27 +378,33 @@ alternative_endif + * size: size of the region + * Corrupts: kaddr, size, tmp1, tmp2 + */ ++ .macro __dcache_op_workaround_clean_cache, op, kaddr ++alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE ++ dc \op, \kaddr ++alternative_else ++ dc civac, \kaddr ++alternative_endif ++ .endm ++ + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + dcache_line_size \tmp1, \tmp2 + add \size, \kaddr, \size + sub \tmp2, \tmp1, #1 + bic \kaddr, \kaddr, \tmp2 + 9998: +- .if (\op == cvau || \op == cvac) +-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE +- dc \op, \kaddr +-alternative_else +- dc civac, \kaddr +-alternative_endif +- .elseif (\op == cvap) +-alternative_if ARM64_HAS_DCPOP +- sys 3, c7, c12, 1, \kaddr // dc cvap +-alternative_else +- dc cvac, \kaddr +-alternative_endif ++ .ifc \op, cvau ++ __dcache_op_workaround_clean_cache \op, \kaddr ++ .else ++ .ifc \op, cvac ++ __dcache_op_workaround_clean_cache \op, \kaddr ++ .else ++ .ifc \op, cvap ++ sys 3, c7, c12, 1, \kaddr // dc cvap + .else + dc \op, \kaddr + .endif ++ .endif ++ .endif + add \kaddr, \kaddr, \tmp1 + cmp \kaddr, \size + b.lo 9998b +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index b96442960aea..56562ff01076 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -76,12 +76,17 @@ + /* + * KASAN requires 1/8th of the kernel virtual address space for the shadow + * region. KASAN can bloat the stack significantly, so double the (minimum) +- * stack size when KASAN is in use. ++ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is ++ * on. + */ + #ifdef CONFIG_KASAN + #define KASAN_SHADOW_SCALE_SHIFT 3 + #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) ++#ifdef CONFIG_KASAN_EXTRA ++#define KASAN_THREAD_SHIFT 2 ++#else + #define KASAN_THREAD_SHIFT 1 ++#endif /* CONFIG_KASAN_EXTRA */ + #else + #define KASAN_SHADOW_SIZE (0) + #define KASAN_THREAD_SHIFT 0 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c +index e213f8e867f6..8a91ac067d44 100644 +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = { + .driver = { + .name = ARMV8_PMU_PDEV_NAME, + .of_match_table = armv8_pmu_of_device_ids, ++ .suppress_bind_attrs = true, + }, + .probe = armv8_pmu_device_probe, + }; +diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S +index 0c22ede52f90..a194fd0e837f 100644 +--- a/arch/arm64/mm/cache.S ++++ b/arch/arm64/mm/cache.S +@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area) + * - size - size in question + */ + ENTRY(__clean_dcache_area_pop) ++ alternative_if_not ARM64_HAS_DCPOP ++ b __clean_dcache_area_poc ++ alternative_else_nop_endif + dcache_by_line_op cvap, sy, x0, x1, x2, x3 + ret + ENDPIPROC(__clean_dcache_area_pop) +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index 154b811d5894..201caf226b47 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -794,6 +794,7 @@ config SIBYTE_SWARM + select SYS_SUPPORTS_HIGHMEM + select SYS_SUPPORTS_LITTLE_ENDIAN + select ZONE_DMA32 if 64BIT ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI + + config SIBYTE_LITTLESUR + bool "Sibyte BCM91250C2-LittleSur" +@@ -814,6 +815,7 @@ config SIBYTE_SENTOSA + select SYS_HAS_CPU_SB1 + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_LITTLE_ENDIAN ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI + + config SIBYTE_BIGSUR + bool "Sibyte BCM91480B-BigSur" +@@ -826,6 +828,7 @@ config SIBYTE_BIGSUR + select SYS_SUPPORTS_HIGHMEM + select SYS_SUPPORTS_LITTLE_ENDIAN + select ZONE_DMA32 if 64BIT ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI + + config SNI_RM + bool "SNI RM200/300/400" +diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile +index b3d6bf23a662..3ef3fb658136 100644 +--- a/arch/mips/sibyte/common/Makefile ++++ b/arch/mips/sibyte/common/Makefile +@@ -1,4 +1,5 @@ + obj-y := cfe.o ++obj-$(CONFIG_SWIOTLB) += dma.o + obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o + obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o + obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o +diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c +new file mode 100644 +index 000000000000..eb47a94f3583 +--- /dev/null ++++ b/arch/mips/sibyte/common/dma.c +@@ -0,0 +1,14 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * DMA support for Broadcom SiByte platforms. ++ * ++ * Copyright (c) 2018 Maciej W. Rozycki ++ */ ++ ++#include <linux/swiotlb.h> ++#include <asm/bootinfo.h> ++ ++void __init plat_swiotlb_setup(void) ++{ ++ swiotlb_init(1); ++} +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index 4264aedc7775..dd6badc31f45 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -75,6 +75,9 @@ static int xmon_gate; + #define xmon_owner 0 + #endif /* CONFIG_SMP */ + ++#ifdef CONFIG_PPC_PSERIES ++static int set_indicator_token = RTAS_UNKNOWN_SERVICE; ++#endif + static unsigned long in_xmon __read_mostly = 0; + static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT); + +@@ -358,7 +361,6 @@ static inline void disable_surveillance(void) + #ifdef CONFIG_PPC_PSERIES + /* Since this can't be a module, args should end up below 4GB. */ + static struct rtas_args args; +- int token; + + /* + * At this point we have got all the cpus we can into +@@ -367,11 +369,11 @@ static inline void disable_surveillance(void) + * If we did try to take rtas.lock there would be a + * real possibility of deadlock. + */ +- token = rtas_token("set-indicator"); +- if (token == RTAS_UNKNOWN_SERVICE) ++ if (set_indicator_token == RTAS_UNKNOWN_SERVICE) + return; + +- rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); ++ rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL, ++ SURVEILLANCE_TOKEN, 0, 0); + + #endif /* CONFIG_PPC_PSERIES */ + } +@@ -3672,6 +3674,14 @@ static void xmon_init(int enable) + __debugger_iabr_match = xmon_iabr_match; + __debugger_break_match = xmon_break_match; + __debugger_fault_handler = xmon_fault_handler; ++ ++#ifdef CONFIG_PPC_PSERIES ++ /* ++ * Get the token here to avoid trying to get a lock ++ * during the crash, causing a deadlock. ++ */ ++ set_indicator_token = rtas_token("set-indicator"); ++#endif + } else { + __debugger = NULL; + __debugger_ipi = NULL; +diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h +index 3de69330e6c5..afbc87206886 100644 +--- a/arch/x86/include/asm/traps.h ++++ b/arch/x86/include/asm/traps.h +@@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi; + + void math_emulate(struct math_emu_info *); + #ifndef CONFIG_X86_32 +-asmlinkage void smp_thermal_interrupt(void); +-asmlinkage void smp_threshold_interrupt(void); +-asmlinkage void smp_deferred_error_interrupt(void); ++asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); ++asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); ++asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); + #endif + + extern void ist_enter(struct pt_regs *regs); +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c +index e12454e21b8a..9f915a8791cc 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c +@@ -23,6 +23,7 @@ + #include <linux/string.h> + + #include <asm/amd_nb.h> ++#include <asm/traps.h> + #include <asm/apic.h> + #include <asm/mce.h> + #include <asm/msr.h> +@@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } + }; + +-const char *smca_get_name(enum smca_bank_types t) ++static const char *smca_get_name(enum smca_bank_types t) + { + if (t >= N_SMCA_BANK_TYPES) + return NULL; +@@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) + mce_log(&m); + } + +-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) ++asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) + { + entering_irq(); + trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c +index 2da67b70ba98..ee229ceee745 100644 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c +@@ -25,6 +25,7 @@ + #include <linux/cpu.h> + + #include <asm/processor.h> ++#include <asm/traps.h> + #include <asm/apic.h> + #include <asm/mce.h> + #include <asm/msr.h> +@@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void) + + static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; + +-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) ++asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) + { + entering_irq(); + trace_thermal_apic_entry(THERMAL_APIC_VECTOR); +diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c +index 2b584b319eff..c21e0a1efd0f 100644 +--- a/arch/x86/kernel/cpu/mcheck/threshold.c ++++ b/arch/x86/kernel/cpu/mcheck/threshold.c +@@ -6,6 +6,7 @@ + #include <linux/kernel.h> + + #include <asm/irq_vectors.h> ++#include <asm/traps.h> + #include <asm/apic.h> + #include <asm/mce.h> + #include <asm/trace/irq_vectors.h> +@@ -18,7 +19,7 @@ static void default_threshold_interrupt(void) + + void (*mce_threshold_vector)(void) = default_threshold_interrupt; + +-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) ++asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) + { + entering_irq(); + trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index f02ecaf97904..6489067b78a4 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -1346,7 +1346,7 @@ void __init calculate_max_logical_packages(void) + * extrapolate the boot cpu's data to all packages. + */ + ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); +- __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); ++ __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); + pr_info("Max logical packages: %u\n", __max_logical_packages); + } + +diff --git a/crypto/ecc.c b/crypto/ecc.c +index 8facafd67802..adcce310f646 100644 +--- a/crypto/ecc.c ++++ b/crypto/ecc.c +@@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, + + static void ecc_point_mult(struct ecc_point *result, + const struct ecc_point *point, const u64 *scalar, +- u64 *initial_z, u64 *curve_prime, ++ u64 *initial_z, const struct ecc_curve *curve, + unsigned int ndigits) + { + /* R0 and R1 */ + u64 rx[2][ECC_MAX_DIGITS]; + u64 ry[2][ECC_MAX_DIGITS]; + u64 z[ECC_MAX_DIGITS]; ++ u64 sk[2][ECC_MAX_DIGITS]; ++ u64 *curve_prime = curve->p; + int i, nb; +- int num_bits = vli_num_bits(scalar, ndigits); ++ int num_bits; ++ int carry; ++ ++ carry = vli_add(sk[0], scalar, curve->n, ndigits); ++ vli_add(sk[1], sk[0], curve->n, ndigits); ++ scalar = sk[!carry]; ++ num_bits = sizeof(u64) * ndigits * 8 + 1; + + vli_set(rx[1], point->x, ndigits); + vli_set(ry[1], point->y, ndigits); +@@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, + goto out; + } + +- ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits); ++ ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); + if (ecc_point_is_zero(pk)) { + ret = -EAGAIN; + goto err_free_point; +@@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + goto err_alloc_product; + } + +- ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); ++ ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); + + ecc_swap_digits(product->x, secret, ndigits); + +diff --git a/drivers/base/bus.c b/drivers/base/bus.c +index 8bfd27ec73d6..585e2e1c9c8f 100644 +--- a/drivers/base/bus.c ++++ b/drivers/base/bus.c +@@ -31,6 +31,9 @@ static struct kset *system_kset; + + #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) + ++#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ ++ struct driver_attribute driver_attr_##_name = \ ++ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) + + static int __must_check bus_rescan_devices_helper(struct device *dev, + void *data); +@@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, + bus_put(bus); + return err; + } +-static DRIVER_ATTR_WO(unbind); ++static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store); + + /* + * Manually attach a device to a driver. +@@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, + bus_put(bus); + return err; + } +-static DRIVER_ATTR_WO(bind); ++static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); + + static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) + { +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index cd2e5cf14ea5..77b67a5f21ee 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -343,6 +343,7 @@ static const struct usb_device_id blacklist_table[] = { + /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW }, ++ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, + { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, +@@ -2054,6 +2055,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) + return -EILSEQ; + } + ++static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, ++ struct intel_boot_params *params, ++ char *fw_name, size_t len, ++ const char *suffix) ++{ ++ switch (ver->hw_variant) { ++ case 0x0b: /* SfP */ ++ case 0x0c: /* WsP */ ++ snprintf(fw_name, len, "intel/ibt-%u-%u.%s", ++ le16_to_cpu(ver->hw_variant), ++ le16_to_cpu(params->dev_revid), ++ suffix); ++ break; ++ case 0x11: /* JfP */ ++ case 0x12: /* ThP */ ++ case 0x13: /* HrP */ ++ case 0x14: /* CcP */ ++ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s", ++ le16_to_cpu(ver->hw_variant), ++ le16_to_cpu(ver->hw_revision), ++ le16_to_cpu(ver->fw_revision), ++ suffix); ++ break; ++ default: ++ return false; ++ } ++ return true; ++} ++ + static int btusb_setup_intel_new(struct hci_dev *hdev) + { + struct btusb_data *data = hci_get_drvdata(hdev); +@@ -2105,7 +2135,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ +- case 0x14: /* QnJ, IcP */ ++ case 0x14: /* CcP */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", +@@ -2189,23 +2219,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) + * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi. + * + */ +- switch (ver.hw_variant) { +- case 0x0b: /* SfP */ +- case 0x0c: /* WsP */ +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", +- le16_to_cpu(ver.hw_variant), +- le16_to_cpu(params.dev_revid)); +- break; +- case 0x11: /* JfP */ +- case 0x12: /* ThP */ +- case 0x13: /* HrP */ +- case 0x14: /* QnJ, IcP */ +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", +- le16_to_cpu(ver.hw_variant), +- le16_to_cpu(ver.hw_revision), +- le16_to_cpu(ver.fw_revision)); +- break; +- default: ++ err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, ++ sizeof(fwname), "sfi"); ++ if (!err) { + bt_dev_err(hdev, "Unsupported Intel firmware naming"); + return -EINVAL; + } +@@ -2221,23 +2237,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) + /* Save the DDC file name for later use to apply once the firmware + * downloading is done. + */ +- switch (ver.hw_variant) { +- case 0x0b: /* SfP */ +- case 0x0c: /* WsP */ +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", +- le16_to_cpu(ver.hw_variant), +- le16_to_cpu(params.dev_revid)); +- break; +- case 0x11: /* JfP */ +- case 0x12: /* ThP */ +- case 0x13: /* HrP */ +- case 0x14: /* QnJ, IcP */ +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", +- le16_to_cpu(ver.hw_variant), +- le16_to_cpu(ver.hw_revision), +- le16_to_cpu(ver.fw_revision)); +- break; +- default: ++ err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, ++ sizeof(fwname), "ddc"); ++ if (!err) { + bt_dev_err(hdev, "Unsupported Intel firmware naming"); + return -EINVAL; + } +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 7fc9612070a1..d5f7a12e350e 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -29,6 +29,7 @@ + #include <linux/moduleparam.h> + #include <linux/workqueue.h> + #include <linux/uuid.h> ++#include <linux/nospec.h> + + #define PFX "IPMI message handler: " + +@@ -61,7 +62,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data, + { } + #endif + +-static int initialized; ++static bool initialized; ++static bool drvregistered; + + enum ipmi_panic_event_op { + IPMI_SEND_PANIC_EVENT_NONE, +@@ -611,7 +613,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); + + static LIST_HEAD(ipmi_interfaces); + static DEFINE_MUTEX(ipmi_interfaces_mutex); +-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); ++struct srcu_struct ipmi_interfaces_srcu; + + /* + * List of watchers that want to know when smi's are added and deleted. +@@ -719,7 +721,15 @@ struct watcher_entry { + int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) + { + struct ipmi_smi *intf; +- int index; ++ int index, rv; ++ ++ /* ++ * Make sure the driver is actually initialized, this handles ++ * problems with initialization order. ++ */ ++ rv = ipmi_init_msghandler(); ++ if (rv) ++ return rv; + + mutex_lock(&smi_watchers_mutex); + +@@ -883,7 +893,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) + + if (user) { + user->handler->ipmi_recv_hndl(msg, user->handler_data); +- release_ipmi_user(msg->user, index); ++ release_ipmi_user(user, index); + } else { + /* User went away, give up. */ + ipmi_free_recv_msg(msg); +@@ -1075,7 +1085,7 @@ int ipmi_create_user(unsigned int if_num, + { + unsigned long flags; + struct ipmi_user *new_user; +- int rv = 0, index; ++ int rv, index; + struct ipmi_smi *intf; + + /* +@@ -1093,18 +1103,9 @@ int ipmi_create_user(unsigned int if_num, + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ +- if (!initialized) { +- rv = ipmi_init_msghandler(); +- if (rv) +- return rv; +- +- /* +- * The init code doesn't return an error if it was turned +- * off, but it won't initialize. Check that. +- */ +- if (!initialized) +- return -ENODEV; +- } ++ rv = ipmi_init_msghandler(); ++ if (rv) ++ return rv; + + new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); + if (!new_user) +@@ -1182,6 +1183,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info); + static void free_user(struct kref *ref) + { + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); ++ cleanup_srcu_struct(&user->release_barrier); + kfree(user); + } + +@@ -1258,7 +1260,6 @@ int ipmi_destroy_user(struct ipmi_user *user) + { + _ipmi_destroy_user(user); + +- cleanup_srcu_struct(&user->release_barrier); + kref_put(&user->refcount, free_user); + + return 0; +@@ -1297,10 +1298,12 @@ int ipmi_set_my_address(struct ipmi_user *user, + if (!user) + return -ENODEV; + +- if (channel >= IPMI_MAX_CHANNELS) ++ if (channel >= IPMI_MAX_CHANNELS) { + rv = -EINVAL; +- else ++ } else { ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); + user->intf->addrinfo[channel].address = address; ++ } + release_ipmi_user(user, index); + + return rv; +@@ -1317,10 +1320,12 @@ int ipmi_get_my_address(struct ipmi_user *user, + if (!user) + return -ENODEV; + +- if (channel >= IPMI_MAX_CHANNELS) ++ if (channel >= IPMI_MAX_CHANNELS) { + rv = -EINVAL; +- else ++ } else { ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); + *address = user->intf->addrinfo[channel].address; ++ } + release_ipmi_user(user, index); + + return rv; +@@ -1337,10 +1342,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user, + if (!user) + return -ENODEV; + +- if (channel >= IPMI_MAX_CHANNELS) ++ if (channel >= IPMI_MAX_CHANNELS) { + rv = -EINVAL; +- else ++ } else { ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); + user->intf->addrinfo[channel].lun = LUN & 0x3; ++ } + release_ipmi_user(user, index); + + return 0; +@@ -1357,10 +1364,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user, + if (!user) + return -ENODEV; + +- if (channel >= IPMI_MAX_CHANNELS) ++ if (channel >= IPMI_MAX_CHANNELS) { + rv = -EINVAL; +- else ++ } else { ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); + *address = user->intf->addrinfo[channel].lun; ++ } + release_ipmi_user(user, index); + + return rv; +@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf, + { + if (addr->channel >= IPMI_MAX_CHANNELS) + return -EINVAL; ++ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); + *lun = intf->addrinfo[addr->channel].lun; + *saddr = intf->addrinfo[addr->channel].address; + return 0; +@@ -3294,17 +3304,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ +- if (!initialized) { +- rv = ipmi_init_msghandler(); +- if (rv) +- return rv; +- /* +- * The init code doesn't return an error if it was turned +- * off, but it won't initialize. Check that. +- */ +- if (!initialized) +- return -ENODEV; +- } ++ rv = ipmi_init_msghandler(); ++ if (rv) ++ return rv; + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) +@@ -5020,6 +5022,22 @@ static int panic_event(struct notifier_block *this, + return NOTIFY_DONE; + } + ++/* Must be called with ipmi_interfaces_mutex held. */ ++static int ipmi_register_driver(void) ++{ ++ int rv; ++ ++ if (drvregistered) ++ return 0; ++ ++ rv = driver_register(&ipmidriver.driver); ++ if (rv) ++ pr_err("Could not register IPMI driver\n"); ++ else ++ drvregistered = true; ++ return rv; ++} ++ + static struct notifier_block panic_block = { + .notifier_call = panic_event, + .next = NULL, +@@ -5030,66 +5048,74 @@ static int ipmi_init_msghandler(void) + { + int rv; + ++ mutex_lock(&ipmi_interfaces_mutex); ++ rv = ipmi_register_driver(); ++ if (rv) ++ goto out; + if (initialized) +- return 0; +- +- rv = driver_register(&ipmidriver.driver); +- if (rv) { +- pr_err(PFX "Could not register IPMI driver\n"); +- return rv; +- } ++ goto out; + +- pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n"); ++ init_srcu_struct(&ipmi_interfaces_srcu); + + timer_setup(&ipmi_timer, ipmi_timeout, 0); + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); + + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + +- initialized = 1; ++ initialized = true; + +- return 0; ++out: ++ mutex_unlock(&ipmi_interfaces_mutex); ++ return rv; + } + + static int __init ipmi_init_msghandler_mod(void) + { +- ipmi_init_msghandler(); +- return 0; ++ int rv; ++ ++ pr_info("version " IPMI_DRIVER_VERSION "\n"); ++ ++ mutex_lock(&ipmi_interfaces_mutex); ++ rv = ipmi_register_driver(); ++ mutex_unlock(&ipmi_interfaces_mutex); ++ ++ return rv; + } + + static void __exit cleanup_ipmi(void) + { + int count; + +- if (!initialized) +- return; +- +- atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); +- +- /* +- * This can't be called if any interfaces exist, so no worry +- * about shutting down the interfaces. +- */ +- +- /* +- * Tell the timer to stop, then wait for it to stop. This +- * avoids problems with race conditions removing the timer +- * here. +- */ +- atomic_inc(&stop_operation); +- del_timer_sync(&ipmi_timer); ++ if (initialized) { ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &panic_block); + +- driver_unregister(&ipmidriver.driver); +- +- initialized = 0; ++ /* ++ * This can't be called if any interfaces exist, so no worry ++ * about shutting down the interfaces. ++ */ + +- /* Check for buffer leaks. */ +- count = atomic_read(&smi_msg_inuse_count); +- if (count != 0) +- pr_warn(PFX "SMI message count %d at exit\n", count); +- count = atomic_read(&recv_msg_inuse_count); +- if (count != 0) +- pr_warn(PFX "recv message count %d at exit\n", count); ++ /* ++ * Tell the timer to stop, then wait for it to stop. This ++ * avoids problems with race conditions removing the timer ++ * here. ++ */ ++ atomic_inc(&stop_operation); ++ del_timer_sync(&ipmi_timer); ++ ++ initialized = false; ++ ++ /* Check for buffer leaks. */ ++ count = atomic_read(&smi_msg_inuse_count); ++ if (count != 0) ++ pr_warn(PFX "SMI message count %d at exit\n", count); ++ count = atomic_read(&recv_msg_inuse_count); ++ if (count != 0) ++ pr_warn(PFX "recv message count %d at exit\n", count); ++ cleanup_srcu_struct(&ipmi_interfaces_srcu); ++ } ++ if (drvregistered) ++ driver_unregister(&ipmidriver.driver); + } + module_exit(cleanup_ipmi); + +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 9b786726e426..76c2010ba672 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -630,8 +630,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + + /* Remove the multi-part read marker. */ + len -= 2; ++ data += 2; + for (i = 0; i < len; i++) +- ssif_info->data[i] = data[i+2]; ++ ssif_info->data[i] = data[i]; + ssif_info->multi_len = len; + ssif_info->multi_pos = 1; + +@@ -659,8 +660,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + } + + blocknum = data[0]; ++ len--; ++ data++; ++ ++ if (blocknum != 0xff && len != 31) { ++ /* All blocks but the last must have 31 data bytes. */ ++ result = -EIO; ++ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) ++ pr_info("Received middle message <31\n"); + +- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { ++ goto continue_op; ++ } ++ ++ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { + /* Received message too big, abort the operation. */ + result = -E2BIG; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) +@@ -669,16 +681,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + goto continue_op; + } + +- /* Remove the blocknum from the data. */ +- len--; + for (i = 0; i < len; i++) +- ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; ++ ssif_info->data[i + ssif_info->multi_len] = data[i]; + ssif_info->multi_len += len; + if (blocknum == 0xff) { + /* End of read */ + len = ssif_info->multi_len; + data = ssif_info->data; +- } else if (blocknum + 1 != ssif_info->multi_pos) { ++ } else if (blocknum != ssif_info->multi_pos) { + /* + * Out of sequence block, just abort. Block + * numbers start at zero for the second block, +@@ -706,6 +716,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + } + } + ++ continue_op: + if (result < 0) { + ssif_inc_stat(ssif_info, receive_errors); + } else { +@@ -713,8 +724,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + ssif_inc_stat(ssif_info, received_message_parts); + } + +- +- continue_op: + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) + pr_info(PFX "DONE 1: state = %d, result=%d.\n", + ssif_info->ssif_state, result); +diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c +index 99036527eb0d..e695622c5aa5 100644 +--- a/drivers/clk/imx/clk-busy.c ++++ b/drivers/clk/imx/clk-busy.c +@@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = { + + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, + u8 width, void __iomem *busy_reg, u8 busy_shift, +- const char **parent_names, int num_parents) ++ const char * const *parent_names, int num_parents) + { + struct clk_busy_mux *busy; + struct clk *clk; +diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c +index c9b327e0a8dd..44817c1b0b88 100644 +--- a/drivers/clk/imx/clk-fixup-mux.c ++++ b/drivers/clk/imx/clk-fixup-mux.c +@@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = { + }; + + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, +- u8 shift, u8 width, const char **parents, ++ u8 shift, u8 width, const char * const *parents, + int num_parents, void (*fixup)(u32 *val)) + { + struct clk_fixup_mux *fixup_mux; +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c +index 8c7c2fcb8d94..c509324f6338 100644 +--- a/drivers/clk/imx/clk-imx6q.c ++++ b/drivers/clk/imx/clk-imx6q.c +@@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) + * lvds1_gate and lvds2_gate are pseudo-gates. Both can be + * independently configured as clock inputs or outputs. We treat + * the "output_enable" bit as a gate, even though it's really just +- * enabling clock output. ++ * enabling clock output. Initially the gate bits are cleared, as ++ * otherwise the exclusive configuration gets locked in the setup done ++ * by software running before the clock driver, with no way to change ++ * it. + */ ++ writel(readl(base + 0x160) & ~0x3c00, base + 0x160); + clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12)); + clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13)); + +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h +index 8076ec040f37..e65c1115d978 100644 +--- a/drivers/clk/imx/clk.h ++++ b/drivers/clk/imx/clk.h +@@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, + + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, + u8 width, void __iomem *busy_reg, u8 busy_shift, +- const char **parent_names, int num_parents); ++ const char * const *parent_names, int num_parents); + + struct clk *imx_clk_fixup_divider(const char *name, const char *parent, + void __iomem *reg, u8 shift, u8 width, + void (*fixup)(u32 *val)); + + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, +- u8 shift, u8 width, const char **parents, ++ u8 shift, u8 width, const char * const *parents, + int num_parents, void (*fixup)(u32 *val)); + + static inline struct clk *imx_clk_fixed(const char *name, int rate) +@@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate) + } + + static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg, +- u8 shift, u8 width, const char **parents, int num_parents) ++ u8 shift, u8 width, const char * const *parents, ++ int num_parents) + { + return clk_register_mux(NULL, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg, +@@ -192,7 +193,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent, + } + + static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, +- u8 shift, u8 width, const char **parents, int num_parents) ++ u8 shift, u8 width, const char * const *parents, ++ int num_parents) + { + return clk_register_mux(NULL, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT, reg, shift, +@@ -200,7 +202,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, + } + + static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, +- u8 shift, u8 width, const char **parents, int num_parents) ++ u8 shift, u8 width, const char * const *parents, ++ int num_parents) + { + return clk_register_mux(NULL, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE, +@@ -208,8 +211,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, + } + + static inline struct clk *imx_clk_mux_flags(const char *name, +- void __iomem *reg, u8 shift, u8 width, const char **parents, +- int num_parents, unsigned long flags) ++ void __iomem *reg, u8 shift, u8 width, ++ const char * const *parents, int num_parents, ++ unsigned long flags) + { + return clk_register_mux(NULL, name, parents, num_parents, + flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0, +diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c +index 74697e145dde..50060e895e7a 100644 +--- a/drivers/clk/meson/meson8b.c ++++ b/drivers/clk/meson/meson8b.c +@@ -568,13 +568,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = { + }; + + static const struct clk_div_table cpu_scale_table[] = { +- { .val = 2, .div = 4 }, +- { .val = 3, .div = 6 }, +- { .val = 4, .div = 8 }, +- { .val = 5, .div = 10 }, +- { .val = 6, .div = 12 }, +- { .val = 7, .div = 14 }, +- { .val = 8, .div = 16 }, ++ { .val = 1, .div = 4 }, ++ { .val = 2, .div = 6 }, ++ { .val = 3, .div = 8 }, ++ { .val = 4, .div = 10 }, ++ { .val = 5, .div = 12 }, ++ { .val = 6, .div = 14 }, ++ { .val = 7, .div = 16 }, ++ { .val = 8, .div = 18 }, + { /* sentinel */ }, + }; + +diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c +index 62d24690ba02..9701107806a7 100644 +--- a/drivers/clocksource/timer-integrator-ap.c ++++ b/drivers/clocksource/timer-integrator-ap.c +@@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) + int irq; + struct clk *clk; + unsigned long rate; +- struct device_node *pri_node; +- struct device_node *sec_node; ++ struct device_node *alias_node; + + base = of_io_request_and_map(node, 0, "integrator-timer"); + if (IS_ERR(base)) +@@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) + return err; + } + +- pri_node = of_find_node_by_path(path); ++ alias_node = of_find_node_by_path(path); ++ ++ /* ++ * The pointer is used as an identifier not as a pointer, we ++ * can drop the refcount on the of__node immediately after ++ * getting it. ++ */ ++ of_node_put(alias_node); ++ ++ if (node == alias_node) ++ /* The primary timer lacks IRQ, use as clocksource */ ++ return integrator_clocksource_init(rate, base); + + err = of_property_read_string(of_aliases, + "arm,timer-secondary", &path); +@@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) + return err; + } + ++ alias_node = of_find_node_by_path(path); + +- sec_node = of_find_node_by_path(path); +- +- if (node == pri_node) +- /* The primary timer lacks IRQ, use as clocksource */ +- return integrator_clocksource_init(rate, base); ++ of_node_put(alias_node); + +- if (node == sec_node) { ++ if (node == alias_node) { + /* The secondary timer will drive the clock event */ + irq = irq_of_parse_and_map(node, 0); + return integrator_clockevent_init(rate, base, irq); +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c +index 9e56bc411061..74c247972bb3 100644 +--- a/drivers/cpuidle/cpuidle-pseries.c ++++ b/drivers/cpuidle/cpuidle-pseries.c +@@ -247,7 +247,13 @@ static int pseries_idle_probe(void) + return -ENODEV; + + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { +- if (lppaca_shared_proc(get_lppaca())) { ++ /* ++ * Use local_paca instead of get_lppaca() since ++ * preemption is not disabled, and it is not required in ++ * fact, since lppaca_ptr does not need to be the value ++ * associated to the current CPU, it can be from any CPU. ++ */ ++ if (lppaca_shared_proc(local_paca->lppaca_ptr)) { + cpuidle_state_table = shared_states; + max_idle_state = ARRAY_SIZE(shared_states); + } else { +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile +index c51627660dbb..d9845099635e 100644 +--- a/drivers/firmware/efi/libstub/Makefile ++++ b/drivers/firmware/efi/libstub/Makefile +@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386 + cflags-$(CONFIG_X86_64) := -mcmodel=small + cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ + -fPIC -fno-strict-aliasing -mno-red-zone \ +- -mno-mmx -mno-sse -fshort-wchar ++ -mno-mmx -mno-sse -fshort-wchar \ ++ -Wno-pointer-sign \ ++ $(call cc-disable-warning, address-of-packed-member) \ ++ $(call cc-disable-warning, gnu) + + # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly + # disable the stackleak plugin +diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c +index 7fa793672a7a..68e4b2b98c8f 100644 +--- a/drivers/fpga/altera-cvp.c ++++ b/drivers/fpga/altera-cvp.c +@@ -468,14 +468,6 @@ static int altera_cvp_probe(struct pci_dev *pdev, + goto err_unmap; + } + +- ret = driver_create_file(&altera_cvp_driver.driver, +- &driver_attr_chkcfg); +- if (ret) { +- dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n"); +- fpga_mgr_unregister(mgr); +- goto err_unmap; +- } +- + return 0; + + err_unmap: +@@ -493,7 +485,6 @@ static void altera_cvp_remove(struct pci_dev *pdev) + struct altera_cvp_conf *conf = mgr->priv; + u16 cmd; + +- driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); + fpga_mgr_unregister(mgr); + pci_iounmap(pdev, conf->map); + pci_release_region(pdev, CVP_BAR); +@@ -502,7 +493,30 @@ static void altera_cvp_remove(struct pci_dev *pdev) + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } + +-module_pci_driver(altera_cvp_driver); ++static int __init altera_cvp_init(void) ++{ ++ int ret; ++ ++ ret = pci_register_driver(&altera_cvp_driver); ++ if (ret) ++ return ret; ++ ++ ret = driver_create_file(&altera_cvp_driver.driver, ++ &driver_attr_chkcfg); ++ if (ret) ++ pr_warn("Can't create sysfs chkcfg file\n"); ++ ++ return 0; ++} ++ ++static void __exit altera_cvp_exit(void) ++{ ++ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); ++ pci_unregister_driver(&altera_cvp_driver); ++} ++ ++module_init(altera_cvp_init); ++module_exit(altera_cvp_exit); + + MODULE_LICENSE("GPL v2"); + MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); +diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c +index 2afd9de84a0d..dc42571e6fdc 100644 +--- a/drivers/gpio/gpio-pl061.c ++++ b/drivers/gpio/gpio-pl061.c +@@ -54,6 +54,7 @@ struct pl061 { + + void __iomem *base; + struct gpio_chip gc; ++ struct irq_chip irq_chip; + int parent_irq; + + #ifdef CONFIG_PM +@@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state) + return irq_set_irq_wake(pl061->parent_irq, state); + } + +-static struct irq_chip pl061_irqchip = { +- .name = "pl061", +- .irq_ack = pl061_irq_ack, +- .irq_mask = pl061_irq_mask, +- .irq_unmask = pl061_irq_unmask, +- .irq_set_type = pl061_irq_type, +- .irq_set_wake = pl061_irq_set_wake, +-}; +- + static int pl061_probe(struct amba_device *adev, const struct amba_id *id) + { + struct device *dev = &adev->dev; +@@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) + /* + * irq_chip support + */ ++ pl061->irq_chip.name = dev_name(dev); ++ pl061->irq_chip.irq_ack = pl061_irq_ack; ++ pl061->irq_chip.irq_mask = pl061_irq_mask; ++ pl061->irq_chip.irq_unmask = pl061_irq_unmask; ++ pl061->irq_chip.irq_set_type = pl061_irq_type; ++ pl061->irq_chip.irq_set_wake = pl061_irq_set_wake; ++ + writeb(0, pl061->base + GPIOIE); /* disable irqs */ + irq = adev->irq[0]; + if (irq < 0) { +@@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) + } + pl061->parent_irq = irq; + +- ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip, ++ ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip, + 0, handle_bad_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_info(&adev->dev, "could not add irqchip\n"); + return ret; + } +- gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip, ++ gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip, + irq, pl061_irq_handler); + + amba_set_drvdata(adev, pl061); +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +index 8a926d1df939..2b4199adcd94 100644 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle) + if (r) + return r; + +- r = amdgpu_uvd_resume(adev); +- if (r) +- return r; +- + ring = &adev->uvd.inst->ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + if (r) + return r; + ++ r = amdgpu_uvd_resume(adev); ++ if (r) ++ return r; ++ + r = amdgpu_uvd_entity_init(adev); + + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +index 50248059412e..88c006c5ee2c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle) + if (r) + return r; + +- r = amdgpu_uvd_resume(adev); +- if (r) +- return r; +- + ring = &adev->uvd.inst->ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + if (r) + return r; + ++ r = amdgpu_uvd_resume(adev); ++ if (r) ++ return r; ++ + r = amdgpu_uvd_entity_init(adev); + + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +index 6ae82cc2e55e..d4070839ac80 100644 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +@@ -420,16 +420,16 @@ static int uvd_v6_0_sw_init(void *handle) + DRM_INFO("UVD ENC is disabled\n"); + } + +- r = amdgpu_uvd_resume(adev); +- if (r) +- return r; +- + ring = &adev->uvd.inst->ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + if (r) + return r; + ++ r = amdgpu_uvd_resume(adev); ++ if (r) ++ return r; ++ + if (uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst->ring_enc[i]; +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +index 9b7f8469bc5c..057151b17b45 100644 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +@@ -444,10 +444,6 @@ static int uvd_v7_0_sw_init(void *handle) + DRM_INFO("PSP loading UVD firmware\n"); + } + +- r = amdgpu_uvd_resume(adev); +- if (r) +- return r; +- + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + if (adev->uvd.harvest_config & (1 << j)) + continue; +@@ -479,6 +475,10 @@ static int uvd_v7_0_sw_init(void *handle) + } + } + ++ r = amdgpu_uvd_resume(adev); ++ if (r) ++ return r; ++ + r = amdgpu_uvd_entity_init(adev); + if (r) + return r; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 1427675d0e5a..5aba50f63ac6 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -661,6 +661,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) + { + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; + bool is_patched = false; ++ unsigned long flags; + + if (!kfd->init_complete) + return; +@@ -670,7 +671,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) + return; + } + +- spin_lock(&kfd->interrupt_lock); ++ spin_lock_irqsave(&kfd->interrupt_lock, flags); + + if (kfd->interrupts_active + && interrupt_is_wanted(kfd, ih_ring_entry, +@@ -679,7 +680,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) + is_patched ? patched_ihre : ih_ring_entry)) + queue_work(kfd->ih_wq, &kfd->interrupt_work); + +- spin_unlock(&kfd->interrupt_lock); ++ spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + } + + int kgd2kfd_quiesce_mm(struct mm_struct *mm) +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +index 9bfb040352e9..6a6d977ddd7a 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +@@ -60,6 +60,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name, + return -EINVAL; + } + ++ if (!stream_state) { ++ DRM_ERROR("No stream state for CRTC%d\n", crtc->index); ++ return -EINVAL; ++ } ++ + /* When enabling CRC, we should also disable dithering. */ + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { + if (dc_stream_configure_crc(stream_state->ctx->dc, +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index 1bb4c318bdd4..f77bff5aa307 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -1425,6 +1425,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + } ++ ++ if (old_state->fake_commit) ++ complete_all(&old_state->fake_commit->flip_done); + } + EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); + +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +index 9973ac893635..3db232429630 100644 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +@@ -334,13 +334,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, + + usnic_dbg("\n"); + +- mutex_lock(&us_ibdev->usdev_lock); + if (ib_get_eth_speed(ibdev, port, &props->active_speed, +- &props->active_width)) { +- mutex_unlock(&us_ibdev->usdev_lock); ++ &props->active_width)) + return -EINVAL; +- } + ++ /* ++ * usdev_lock is acquired after (and not before) ib_get_eth_speed call ++ * because acquiring rtnl_lock in ib_get_eth_speed, while holding ++ * usdev_lock could lead to a deadlock. ++ */ ++ mutex_lock(&us_ibdev->usdev_lock); + /* props being zeroed by the caller, avoid zeroing it here */ + + props->lid = 0; +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c +index 8be27238a86e..fa98a5279647 100644 +--- a/drivers/infiniband/sw/rxe/rxe_req.c ++++ b/drivers/infiniband/sw/rxe/rxe_req.c +@@ -640,6 +640,7 @@ next_wqe: + rmr->access = wqe->wr.wr.reg.access; + rmr->lkey = wqe->wr.wr.reg.key; + rmr->rkey = wqe->wr.wr.reg.key; ++ rmr->iova = wqe->wr.wr.reg.mr->iova; + wqe->state = wqe_state_done; + wqe->status = IB_WC_SUCCESS; + } else { +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 0481223b1deb..f2ec882f96be 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -49,7 +49,7 @@ struct convert_context { + struct bio *bio_out; + struct bvec_iter iter_in; + struct bvec_iter iter_out; +- sector_t cc_sector; ++ u64 cc_sector; + atomic_t cc_pending; + union { + struct skcipher_request *req; +@@ -81,7 +81,7 @@ struct dm_crypt_request { + struct convert_context *ctx; + struct scatterlist sg_in[4]; + struct scatterlist sg_out[4]; +- sector_t iv_sector; ++ u64 iv_sector; + }; + + struct crypt_config; +@@ -160,7 +160,7 @@ struct crypt_config { + struct iv_lmk_private lmk; + struct iv_tcw_private tcw; + } iv_gen_private; +- sector_t iv_offset; ++ u64 iv_offset; + unsigned int iv_size; + unsigned short int sector_size; + unsigned char sector_shift; +@@ -2780,7 +2780,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) + } + + ret = -EINVAL; +- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { ++ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { + ti->error = "Invalid device sector"; + goto bad; + } +diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c +index 2fb7bb4304ad..fddffe251bf6 100644 +--- a/drivers/md/dm-delay.c ++++ b/drivers/md/dm-delay.c +@@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a + unsigned long long tmpll; + char dummy; + +- if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { ++ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { + ti->error = "Invalid device sector"; + return -EINVAL; + } +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c +index 32aabe27b37c..b86d2439ffc7 100644 +--- a/drivers/md/dm-flakey.c ++++ b/drivers/md/dm-flakey.c +@@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) + devname = dm_shift_arg(&as); + + r = -EINVAL; +- if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) { ++ if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { + ti->error = "Invalid device sector"; + goto bad; + } +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c +index 2fc4213e02b5..671c24332802 100644 +--- a/drivers/md/dm-kcopyd.c ++++ b/drivers/md/dm-kcopyd.c +@@ -56,15 +56,17 @@ struct dm_kcopyd_client { + atomic_t nr_jobs; + + /* +- * We maintain three lists of jobs: ++ * We maintain four lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. +- * iii) jobs that have completed. ++ * iii) jobs that don't need to do any IO and just run a callback ++ * iv) jobs that have completed. + * +- * All three of these are protected by job_lock. ++ * All four of these are protected by job_lock. + */ + spinlock_t job_lock; ++ struct list_head callback_jobs; + struct list_head complete_jobs; + struct list_head io_jobs; + struct list_head pages_jobs; +@@ -625,6 +627,7 @@ static void do_work(struct work_struct *work) + struct dm_kcopyd_client *kc = container_of(work, + struct dm_kcopyd_client, kcopyd_work); + struct blk_plug plug; ++ unsigned long flags; + + /* + * The order that these are called is *very* important. +@@ -633,6 +636,10 @@ static void do_work(struct work_struct *work) + * list. io jobs call wake when they complete and it all + * starts again. + */ ++ spin_lock_irqsave(&kc->job_lock, flags); ++ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs); ++ spin_unlock_irqrestore(&kc->job_lock, flags); ++ + blk_start_plug(&plug); + process_jobs(&kc->complete_jobs, kc, run_complete_job); + process_jobs(&kc->pages_jobs, kc, run_pages_job); +@@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job) + struct dm_kcopyd_client *kc = job->kc; + atomic_inc(&kc->nr_jobs); + if (unlikely(!job->source.count)) +- push(&kc->complete_jobs, job); ++ push(&kc->callback_jobs, job); + else if (job->pages == &zero_page_list) + push(&kc->io_jobs, job); + else +@@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err) + job->read_err = read_err; + job->write_err = write_err; + +- push(&kc->complete_jobs, job); ++ push(&kc->callback_jobs, job); + wake(kc); + } + EXPORT_SYMBOL(dm_kcopyd_do_callback); +@@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro + return ERR_PTR(-ENOMEM); + + spin_lock_init(&kc->job_lock); ++ INIT_LIST_HEAD(&kc->callback_jobs); + INIT_LIST_HEAD(&kc->complete_jobs); + INIT_LIST_HEAD(&kc->io_jobs); + INIT_LIST_HEAD(&kc->pages_jobs); +@@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) + /* Wait for completion of all jobs submitted by this client. */ + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); + ++ BUG_ON(!list_empty(&kc->callback_jobs)); + BUG_ON(!list_empty(&kc->complete_jobs)); + BUG_ON(!list_empty(&kc->io_jobs)); + BUG_ON(!list_empty(&kc->pages_jobs)); +diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c +index 2f7c44a006c4..caa08c4b84cd 100644 +--- a/drivers/md/dm-linear.c ++++ b/drivers/md/dm-linear.c +@@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) + } + + ret = -EINVAL; +- if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) { ++ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { + ti->error = "Invalid device sector"; + goto bad; + } +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 79eab1071ec2..5a51151f680d 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + char dummy; + int ret; + +- if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { ++ if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 || ++ offset != (sector_t)offset) { + ti->error = "Invalid offset"; + return -EINVAL; + } +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index ae4b33d10924..36805b12661e 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -19,6 +19,7 @@ + #include <linux/vmalloc.h> + #include <linux/log2.h> + #include <linux/dm-kcopyd.h> ++#include <linux/semaphore.h> + + #include "dm.h" + +@@ -105,6 +106,9 @@ struct dm_snapshot { + /* The on disk metadata handler */ + struct dm_exception_store *store; + ++ /* Maximum number of in-flight COW jobs. */ ++ struct semaphore cow_count; ++ + struct dm_kcopyd_client *kcopyd_client; + + /* Wait for events based on state_bits */ +@@ -145,6 +149,19 @@ struct dm_snapshot { + #define RUNNING_MERGE 0 + #define SHUTDOWN_MERGE 1 + ++/* ++ * Maximum number of chunks being copied on write. ++ * ++ * The value was decided experimentally as a trade-off between memory ++ * consumption, stalling the kernel's workqueues and maintaining a high enough ++ * throughput. ++ */ ++#define DEFAULT_COW_THRESHOLD 2048 ++ ++static int cow_threshold = DEFAULT_COW_THRESHOLD; ++module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); ++MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); ++ + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, + "A percentage of time allocated for copy on write"); + +@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) + goto bad_hash_tables; + } + ++ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); ++ + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); + if (IS_ERR(s->kcopyd_client)) { + r = PTR_ERR(s->kcopyd_client); +@@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) + rb_link_node(&pe->out_of_order_node, parent, p); + rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); + } ++ up(&s->cow_count); + } + + /* +@@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) + dest.count = src.count; + + /* Hand over to kcopyd */ ++ down(&s->cow_count); + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); + } + +@@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, + pe->full_bio = bio; + pe->full_bio_end_io = bio->bi_end_io; + ++ down(&s->cow_count); + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, + copy_callback, pe); + +diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c +index 954b7ab4e684..e673dacf6418 100644 +--- a/drivers/md/dm-unstripe.c ++++ b/drivers/md/dm-unstripe.c +@@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) + goto err; + } + +- if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) { ++ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { + ti->error = "Invalid striped device offset"; + goto err; + } +diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c +index 1c933b2cf760..3ef5df1648d7 100644 +--- a/drivers/media/firewire/firedtv-avc.c ++++ b/drivers/media/firewire/firedtv-avc.c +@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r) + return r->operand[7]; + } + +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len) ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, ++ unsigned int *len) + { + struct avc_command_frame *c = (void *)fdtv->avc_data; + struct avc_response_frame *r = (void *)fdtv->avc_data; +@@ -1009,7 +1010,8 @@ out: + return ret; + } + +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len) ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, ++ unsigned int *len) + { + struct avc_command_frame *c = (void *)fdtv->avc_data; + struct avc_response_frame *r = (void *)fdtv->avc_data; +diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h +index 876cdec8329b..009905a19947 100644 +--- a/drivers/media/firewire/firedtv.h ++++ b/drivers/media/firewire/firedtv.h +@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst, + struct dvb_diseqc_master_cmd *diseqcmd); + void avc_remote_ctrl_work(struct work_struct *work); + int avc_register_remote_control(struct firedtv *fdtv); +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len); +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len); ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, ++ unsigned int *len); ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, ++ unsigned int *len); + int avc_ca_reset(struct firedtv *fdtv); + int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length); + int avc_ca_get_time_date(struct firedtv *fdtv, int *interval); +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c +index bb6add9d340e..5b8350e87e75 100644 +--- a/drivers/media/platform/qcom/venus/core.c ++++ b/drivers/media/platform/qcom/venus/core.c +@@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev) + if (ret) + return ret; + ++ if (!dev->dma_parms) { ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), ++ GFP_KERNEL); ++ if (!dev->dma_parms) ++ return -ENOMEM; ++ } ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); ++ + INIT_LIST_HEAD(&core->instances); + mutex_init(&core->lock); + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index d46dc432456c..361abbc00486 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref) + usb_put_intf(dev->intf); + usb_put_dev(dev->udev); + +- if (dev->vdev.dev) +- v4l2_device_unregister(&dev->vdev); + #ifdef CONFIG_MEDIA_CONTROLLER +- if (media_devnode_is_registered(dev->mdev.devnode)) +- media_device_unregister(&dev->mdev); + media_device_cleanup(&dev->mdev); + #endif + +@@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev) + + uvc_debugfs_cleanup_stream(stream); + } ++ ++ uvc_status_unregister(dev); ++ ++ if (dev->vdev.dev) ++ v4l2_device_unregister(&dev->vdev); ++#ifdef CONFIG_MEDIA_CONTROLLER ++ if (media_devnode_is_registered(dev->mdev.devnode)) ++ media_device_unregister(&dev->mdev); ++#endif + } + + int uvc_register_video_device(struct uvc_device *dev, +diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c +index 0722dc684378..883e4cab45e7 100644 +--- a/drivers/media/usb/uvc/uvc_status.c ++++ b/drivers/media/usb/uvc/uvc_status.c +@@ -54,7 +54,7 @@ error: + return ret; + } + +-static void uvc_input_cleanup(struct uvc_device *dev) ++static void uvc_input_unregister(struct uvc_device *dev) + { + if (dev->input) + input_unregister_device(dev->input); +@@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code, + + #else + #define uvc_input_init(dev) +-#define uvc_input_cleanup(dev) ++#define uvc_input_unregister(dev) + #define uvc_input_report_key(dev, code, value) + #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */ + +@@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev) + return 0; + } + +-void uvc_status_cleanup(struct uvc_device *dev) ++void uvc_status_unregister(struct uvc_device *dev) + { + usb_kill_urb(dev->int_urb); ++ uvc_input_unregister(dev); ++} ++ ++void uvc_status_cleanup(struct uvc_device *dev) ++{ + usb_free_urb(dev->int_urb); + kfree(dev->status); +- uvc_input_cleanup(dev); + } + + int uvc_status_start(struct uvc_device *dev, gfp_t flags) +diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h +index e5f5d84f1d1d..a738486fd9d6 100644 +--- a/drivers/media/usb/uvc/uvcvideo.h ++++ b/drivers/media/usb/uvc/uvcvideo.h +@@ -750,6 +750,7 @@ int uvc_register_video_device(struct uvc_device *dev, + + /* Status */ + int uvc_status_init(struct uvc_device *dev); ++void uvc_status_unregister(struct uvc_device *dev); + void uvc_status_cleanup(struct uvc_device *dev); + int uvc_status_start(struct uvc_device *dev, gfp_t flags); + void uvc_status_stop(struct uvc_device *dev); +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c +index be53044086c7..fbc56ee99682 100644 +--- a/drivers/mmc/host/atmel-mci.c ++++ b/drivers/mmc/host/atmel-mci.c +@@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv) + } + + atmci_request_end(host, host->mrq); +- state = STATE_IDLE; ++ goto unlock; /* atmci_request_end() sets host->state */ + break; + } + } while (state != prev_state); + + host->state = state; + ++unlock: + spin_unlock(&host->lock); + } + +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 8da3d39e3218..258918d8a416 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -2391,6 +2391,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) + return mv88e6xxx_g1_stats_clear(chip); + } + ++/* The mv88e6390 has some hidden registers used for debug and ++ * development. The errata also makes use of them. ++ */ ++static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, ++ int reg, u16 val) ++{ ++ u16 ctrl; ++ int err; ++ ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, ++ PORT_RESERVED_1A, val); ++ if (err) ++ return err; ++ ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | ++ reg; ++ ++ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, ++ PORT_RESERVED_1A, ctrl); ++} ++ ++static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) ++{ ++ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, ++ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); ++} ++ ++ ++static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, ++ int reg, u16 *val) ++{ ++ u16 ctrl; ++ int err; ++ ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | ++ reg; ++ ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, ++ PORT_RESERVED_1A, ctrl); ++ if (err) ++ return err; ++ ++ err = mv88e6390_hidden_wait(chip); ++ if (err) ++ return err; ++ ++ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, ++ PORT_RESERVED_1A, val); ++} ++ ++/* Check if the errata has already been applied. */ ++static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) ++{ ++ int port; ++ int err; ++ u16 val; ++ ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { ++ err = mv88e6390_hidden_read(chip, port, 0, &val); ++ if (err) { ++ dev_err(chip->dev, ++ "Error reading hidden register: %d\n", err); ++ return false; ++ } ++ if (val != 0x01c0) ++ return false; ++ } ++ ++ return true; ++} ++ ++/* The 6390 copper ports have an errata which require poking magic ++ * values into undocumented hidden registers and then performing a ++ * software reset. ++ */ ++static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) ++{ ++ int port; ++ int err; ++ ++ if (mv88e6390_setup_errata_applied(chip)) ++ return 0; ++ ++ /* Set the ports into blocking mode */ ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { ++ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); ++ if (err) ++ return err; ++ } ++ ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { ++ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); ++ if (err) ++ return err; ++ } ++ ++ return mv88e6xxx_software_reset(chip); ++} ++ + static int mv88e6xxx_setup(struct dsa_switch *ds) + { + struct mv88e6xxx_chip *chip = ds->priv; +@@ -2403,6 +2504,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) + + mutex_lock(&chip->reg_lock); + ++ if (chip->info->ops->setup_errata) { ++ err = chip->info->ops->setup_errata(chip); ++ if (err) ++ goto unlock; ++ } ++ + /* Cache the cmode of each port. */ + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { + if (chip->info->ops->port_get_cmode) { +@@ -3201,6 +3308,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { + + static const struct mv88e6xxx_ops mv88e6190_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +@@ -3243,6 +3351,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { + + static const struct mv88e6xxx_ops mv88e6190x_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +@@ -3285,6 +3394,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { + + static const struct mv88e6xxx_ops mv88e6191_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +@@ -3374,6 +3484,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { + + static const struct mv88e6xxx_ops mv88e6290_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +@@ -3675,6 +3786,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { + + static const struct mv88e6xxx_ops mv88e6390_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +@@ -3722,6 +3834,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { + + static const struct mv88e6xxx_ops mv88e6390x_ops = { + /* MV88E6XXX_FAMILY_6390 */ ++ .setup_errata = mv88e6390_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, +diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h +index f9ecb7872d32..546651d8c3e1 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.h ++++ b/drivers/net/dsa/mv88e6xxx/chip.h +@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { + }; + + struct mv88e6xxx_ops { ++ /* Switch Setup Errata, called early in the switch setup to ++ * allow any errata actions to be performed ++ */ ++ int (*setup_errata)(struct mv88e6xxx_chip *chip); ++ + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); + int (*ip_pri_map)(struct mv88e6xxx_chip *chip); + +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h +index f32f56af8e35..b31910023bb6 100644 +--- a/drivers/net/dsa/mv88e6xxx/port.h ++++ b/drivers/net/dsa/mv88e6xxx/port.h +@@ -251,6 +251,16 @@ + /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ + #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 + ++/* Offset 0x1a: Magic undocumented errata register */ ++#define PORT_RESERVED_1A 0x1a ++#define PORT_RESERVED_1A_BUSY BIT(15) ++#define PORT_RESERVED_1A_WRITE BIT(14) ++#define PORT_RESERVED_1A_READ 0 ++#define PORT_RESERVED_1A_PORT_SHIFT 5 ++#define PORT_RESERVED_1A_BLOCK (0xf << 10) ++#define PORT_RESERVED_1A_CTRL_PORT 4 ++#define PORT_RESERVED_1A_DATA_PORT 5 ++ + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, + u16 *val); + int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, +diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c +index 37c76945ad9b..e1f821edbc21 100644 +--- a/drivers/net/ethernet/intel/e1000e/ptp.c ++++ b/drivers/net/ethernet/intel/e1000e/ptp.c +@@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; +- u64 ns; ++ u64 cycles, ns; + + spin_lock_irqsave(&adapter->systim_lock, flags); +- ns = timecounter_read(&adapter->tc); ++ ++ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */ ++ cycles = adapter->cc.read(&adapter->cc); ++ ns = timecounter_cyc2time(&adapter->tc, cycles); ++ + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + *ts = ns_to_timespec64(ns); +@@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) + systim_overflow_work.work); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; ++ u64 ns; + +- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); ++ /* Update the timecounter */ ++ ns = timecounter_read(&adapter->tc); + ++ ts = ns_to_timespec64(ns); + e_dbg("SYSTIM overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +index add124e0381d..b27f7a968820 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +@@ -4,6 +4,7 @@ + #include "ixgbe.h" + #include <net/xfrm.h> + #include <crypto/aead.h> ++#include <linux/if_bridge.h> + + /** + * ixgbe_ipsec_set_tx_sa - set the Tx SA registers +@@ -676,7 +677,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) + } else { + struct tx_sa tsa; + +- if (adapter->num_vfs) ++ if (adapter->num_vfs && ++ adapter->bridge_mode != BRIDGE_MODE_VEPA) + return -EOPNOTSUPP; + + /* find the first unused index */ +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +index 3d1159f8a53f..de821a9fdfaf 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -4635,12 +4635,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, + lower_dev, + upper_dev); + } else if (netif_is_lag_master(upper_dev)) { +- if (info->linking) ++ if (info->linking) { + err = mlxsw_sp_port_lag_join(mlxsw_sp_port, + upper_dev); +- else ++ } else { ++ mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, ++ false); + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); ++ } + } else if (netif_is_ovs_master(upper_dev)) { + if (info->linking) + err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +index 4eb64cb0d9a1..0d9ea37c5d21 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +@@ -1761,7 +1761,7 @@ static void + mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, u16 vid) + { +- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; ++ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 07f3080eca18..5f45ffeeecb4 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -214,6 +214,8 @@ enum cfg_version { + }; + + static const struct pci_device_id rtl8169_pci_tbl[] = { ++ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, ++ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 774e1ff01c9a..735ad838e2ba 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev) + dev->addr_len = 0; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->netdev_ops = &qmimux_netdev_ops; ++ dev->mtu = 1500; + dev->needs_free_netdev = true; + } + +diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c +index a63c97e2c50c..6f10331e986b 100644 +--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c ++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c +@@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find_by_id(ar, peer_id); +- if (!peer) ++ if (!peer || !peer->sta) + goto out; + + arsta = (struct ath10k_sta *)peer->sta->drv_priv; +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c +index 4d1cd90d6d27..03d4cc6f35bc 100644 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c +@@ -2589,7 +2589,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); +- if (!peer) { ++ if (!peer || !peer->sta) { + ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", + peer_id); + goto out; +@@ -2642,7 +2642,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); +- if (!peer) { ++ if (!peer || !peer->sta) { + ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", + peer_id); + goto out; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index afed549f5645..9a764af30f36 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -2938,7 +2938,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + +- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, ++ false); + ret = iwl_mvm_update_sta(mvm, vif, sta); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { +@@ -2954,7 +2955,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + +- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, ++ true); + + ret = 0; + } else if (old_state == IEEE80211_STA_AUTHORIZED && +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +index b3987a0a7018..6b65ad6c9b56 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +@@ -1685,7 +1685,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + #endif /* CONFIG_IWLWIFI_DEBUGFS */ + + /* rate scaling */ +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); + void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); + int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); + void rs_update_last_rssi(struct iwl_mvm *mvm, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +index f2830b5693d2..6b9c670fcef8 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +@@ -1280,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + (unsigned long)(lq_sta->last_tx + + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); +- iwl_mvm_rs_rate_init(mvm, sta, info->band); ++ iwl_mvm_rs_rate_init(mvm, sta, info->band, true); + return; + } + lq_sta->last_tx = jiffies; +@@ -2870,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, + static void rs_initialize_lq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, +- enum nl80211_band band) ++ enum nl80211_band band, bool update) + { +- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_scale_tbl_info *tbl; + struct rs_rate *rate; + u8 active_tbl = 0; +@@ -2901,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, + rs_set_expected_tpt_table(lq_sta, tbl); + rs_fill_lq_cmd(mvm, sta, lq_sta, rate); + /* TODO restore station should remember the lq cmd */ +- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, +- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); ++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); + } + + static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, +@@ -3155,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) + * Called after adding a new station to initialize rate scaling + */ + static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, +- enum nl80211_band band) ++ enum nl80211_band band, bool update) + { + int i, j; + struct ieee80211_hw *hw = mvm->hw; +@@ -3235,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + #ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_mvm_reset_frame_stats(mvm); + #endif +- rs_initialize_lq(mvm, sta, lq_sta, band); ++ rs_initialize_lq(mvm, sta, lq_sta, band, update); + } + + static void rs_drv_rate_update(void *mvm_r, +@@ -3255,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r, + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_stop_tx_ba_session(sta, tid); + +- iwl_mvm_rs_rate_init(mvm, sta, sband->band); ++ iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); + } + + #ifdef CONFIG_MAC80211_DEBUGFS +@@ -4112,12 +4110,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { + }; + + void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, +- enum nl80211_band band) ++ enum nl80211_band band, bool update) + { + if (iwl_mvm_has_tlc_offload(mvm)) + rs_fw_rate_init(mvm, sta, band); + else +- rs_drv_rate_init(mvm, sta, band); ++ rs_drv_rate_init(mvm, sta, band, update); + } + + int iwl_mvm_rate_control_register(void) +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +index d2cf484e2b73..8e7f993e2911 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +@@ -420,7 +420,7 @@ struct iwl_lq_sta { + + /* Initialize station's rate scaling information after adding station */ + void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, +- enum nl80211_band band); ++ enum nl80211_band band, bool init); + + /* Notify RS about Tx status */ + void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +index b002a7afb5f5..6a5349401aa9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +@@ -900,20 +900,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + + /** + * iwl_mvm_send_lq_cmd() - Send link quality command +- * @init: This command is sent as part of station initialization right +- * after station has been added. ++ * @sync: This command can be sent synchronously. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) + { + struct iwl_host_cmd cmd = { + .id = LQ_CMD, + .len = { sizeof(struct iwl_lq_cmd), }, +- .flags = init ? 0 : CMD_ASYNC, ++ .flags = sync ? 0 : CMD_ASYNC, + .data = { lq, }, + }; + +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c +index eda57ef12fd0..baa9cee6fa2c 100644 +--- a/drivers/of/overlay.c ++++ b/drivers/of/overlay.c +@@ -378,7 +378,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, + if (ret) + return ret; + +- return build_changeset_next_level(ovcs, tchild, node); ++ ret = build_changeset_next_level(ovcs, tchild, node); ++ of_node_put(tchild); ++ return ret; + } + + if (node->phandle && tchild->phandle) +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 2d6e272315a8..db3556dc90d1 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -2231,7 +2231,8 @@ static int asus_wmi_add(struct platform_device *pdev) + err = asus_wmi_backlight_init(asus); + if (err && err != -ENODEV) + goto fail_backlight; +- } ++ } else ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); + + status = wmi_install_notify_handler(asus->driver->event_guid, + asus_wmi_notify, asus); +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c +index 59ecbb3b53b5..a33628550425 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c +@@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, drv_map); +- if (ld >= MAX_LOGICAL_DRIVES_EXT) { ++ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { + lbInfo[ldCount].loadBalanceFlag = 0; + continue; + } +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index c7f95bace353..f45c54f02bfa 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, + device_id < instance->fw_supported_vd_count)) { + + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); +- if (ld >= instance->fw_supported_vd_count) ++ if (ld >= instance->fw_supported_vd_count - 1) + fp_possible = 0; + else { + raid = MR_LdRaidGet(ld, local_map_ptr); +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 59d7844ee022..b59bba3e6516 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -3344,8 +3344,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, + static inline void + _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) + { ++ wmb(); + __raw_writeq(b, addr); +- mmiowb(); ++ barrier(); + } + #else + static inline void +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c +index e5bd035ebad0..4de740da547b 100644 +--- a/drivers/scsi/qedi/qedi_main.c ++++ b/drivers/scsi/qedi/qedi_main.c +@@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi, + cls_sess = iscsi_conn_to_session(cls_conn); + sess = cls_sess->dd_data; + ++ if (!iscsi_is_session_online(cls_sess)) ++ continue; ++ + if (pri_ctrl_flags) { + if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && + !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c +index 2112ea6723c6..8c1a232ac6bf 100644 +--- a/drivers/scsi/smartpqi/smartpqi_init.c ++++ b/drivers/scsi/smartpqi/smartpqi_init.c +@@ -2720,6 +2720,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, + switch (response->header.iu_type) { + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: + case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: ++ if (io_request->scmd) ++ io_request->scmd->result = 0; ++ /* fall through */ + case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: + break; + case PQI_RESPONSE_IU_TASK_MANAGEMENT: +@@ -6686,6 +6689,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev) + * storage. + */ + rc = pqi_flush_cache(ctrl_info, SHUTDOWN); ++ pqi_free_interrupts(ctrl_info); + pqi_reset(ctrl_info); + if (rc == 0) + return; +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c +index 14da8cc2246a..0346630b67c8 100644 +--- a/drivers/staging/erofs/unzip_vle.c ++++ b/drivers/staging/erofs/unzip_vle.c +@@ -724,13 +724,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) + struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t); + bool background = tagptr_unfold_tags(t); + +- if (atomic_add_return(bios, &io->pending_bios)) ++ if (!background) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&io->u.wait.lock, flags); ++ if (!atomic_add_return(bios, &io->pending_bios)) ++ wake_up_locked(&io->u.wait); ++ spin_unlock_irqrestore(&io->u.wait.lock, flags); + return; ++ } + +- if (background) ++ if (!atomic_add_return(bios, &io->pending_bios)) + queue_work(z_erofs_workqueue, &io->u.work); +- else +- wake_up(&io->u.wait); + } + + static inline void z_erofs_vle_read_endio(struct bio *bio) +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c +index cb0461a10808..93424db5f002 100644 +--- a/drivers/target/target_core_spc.c ++++ b/drivers/target/target_core_spc.c +@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) + + buf[7] = 0x2; /* CmdQue=1 */ + +- memcpy(&buf[8], "LIO-ORG ", 8); +- memset(&buf[16], 0x20, 16); ++ /* ++ * ASCII data fields described as being left-aligned shall have any ++ * unused bytes at the end of the field (i.e., highest offset) and the ++ * unused bytes shall be filled with ASCII space characters (20h). ++ */ ++ memset(&buf[8], 0x20, 8 + 16 + 4); ++ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1); + memcpy(&buf[16], dev->t10_wwn.model, +- min_t(size_t, strlen(dev->t10_wwn.model), 16)); ++ strnlen(dev->t10_wwn.model, 16)); + memcpy(&buf[32], dev->t10_wwn.revision, +- min_t(size_t, strlen(dev->t10_wwn.revision), 4)); ++ strnlen(dev->t10_wwn.revision, 4)); + buf[4] = 31; /* Set additional length to 31 */ + + return 0; +@@ -251,7 +256,9 @@ check_t10_vend_desc: + buf[off] = 0x2; /* ASCII */ + buf[off+1] = 0x1; /* T10 Vendor ID */ + buf[off+2] = 0x0; +- memcpy(&buf[off+4], "LIO-ORG", 8); ++ /* left align Vendor ID and pad with spaces */ ++ memset(&buf[off+4], 0x20, 8); ++ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1); + /* Extra Byte for NULL Terminator */ + id_len++; + /* Identifier Length */ +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index fc3093d21b96..3f7aad45d215 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void) + sub_api_initialized = 1; + } + ++static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) ++{ ++ struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); ++ ++ wake_up(&sess->cmd_list_wq); ++} ++ + /** + * transport_init_session - initialize a session object + * @se_sess: Session object pointer. + * + * The caller must have zero-initialized @se_sess before calling this function. + */ +-void transport_init_session(struct se_session *se_sess) ++int transport_init_session(struct se_session *se_sess) + { + INIT_LIST_HEAD(&se_sess->sess_list); + INIT_LIST_HEAD(&se_sess->sess_acl_list); + INIT_LIST_HEAD(&se_sess->sess_cmd_list); + spin_lock_init(&se_sess->sess_cmd_lock); + init_waitqueue_head(&se_sess->cmd_list_wq); ++ return percpu_ref_init(&se_sess->cmd_count, ++ target_release_sess_cmd_refcnt, 0, GFP_KERNEL); + } + EXPORT_SYMBOL(transport_init_session); + +@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session); + struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) + { + struct se_session *se_sess; ++ int ret; + + se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); + if (!se_sess) { +@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) + " se_sess_cache\n"); + return ERR_PTR(-ENOMEM); + } +- transport_init_session(se_sess); ++ ret = transport_init_session(se_sess); ++ if (ret < 0) { ++ kfree(se_sess); ++ return ERR_PTR(ret); ++ } + se_sess->sup_prot_ops = sup_prot_ops; + + return se_sess; +@@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess) + sbitmap_queue_free(&se_sess->sess_tag_pool); + kvfree(se_sess->sess_cmd_map); + } ++ percpu_ref_exit(&se_sess->cmd_count); + kmem_cache_free(se_sess_cache, se_sess); + } + EXPORT_SYMBOL(transport_free_session); +@@ -2724,6 +2739,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) + } + se_cmd->transport_state |= CMD_T_PRE_EXECUTE; + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); ++ percpu_ref_get(&se_sess->cmd_count); + out: + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + +@@ -2754,8 +2770,6 @@ static void target_release_cmd_kref(struct kref *kref) + if (se_sess) { + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + list_del_init(&se_cmd->se_cmd_list); +- if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list)) +- wake_up(&se_sess->cmd_list_wq); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } + +@@ -2763,6 +2777,8 @@ static void target_release_cmd_kref(struct kref *kref) + se_cmd->se_tfo->release_cmd(se_cmd); + if (compl) + complete(compl); ++ ++ percpu_ref_put(&se_sess->cmd_count); + } + + /** +@@ -2891,6 +2907,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + se_sess->sess_tearing_down = 1; + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ ++ percpu_ref_kill(&se_sess->cmd_count); + } + EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); + +@@ -2905,17 +2923,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + + WARN_ON_ONCE(!se_sess->sess_tearing_down); + +- spin_lock_irq(&se_sess->sess_cmd_lock); + do { +- ret = wait_event_lock_irq_timeout( +- se_sess->cmd_list_wq, +- list_empty(&se_sess->sess_cmd_list), +- se_sess->sess_cmd_lock, 180 * HZ); ++ ret = wait_event_timeout(se_sess->cmd_list_wq, ++ percpu_ref_is_zero(&se_sess->cmd_count), ++ 180 * HZ); + list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) + target_show_cmd("session shutdown: still waiting for ", + cmd); + } while (ret <= 0); +- spin_unlock_irq(&se_sess->sess_cmd_lock); + } + EXPORT_SYMBOL(target_wait_for_sess_cmds); + +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c +index 2718a933c0c6..7cdb5d7f6538 100644 +--- a/drivers/target/target_core_xcopy.c ++++ b/drivers/target/target_core_xcopy.c +@@ -480,6 +480,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = { + + int target_xcopy_setup_pt(void) + { ++ int ret; ++ + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); + if (!xcopy_wq) { + pr_err("Unable to allocate xcopy_wq\n"); +@@ -497,7 +499,9 @@ int target_xcopy_setup_pt(void) + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); + memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); +- transport_init_session(&xcopy_pt_sess); ++ ret = transport_init_session(&xcopy_pt_sess); ++ if (ret < 0) ++ return ret; + + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index ebd33c0232e6..89ade213a1a9 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2780,6 +2780,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = { + .name = "sbsa-uart", + .of_match_table = of_match_ptr(sbsa_uart_of_match), + .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), + }, + }; + +@@ -2808,6 +2809,7 @@ static struct amba_driver pl011_driver = { + .drv = { + .name = "uart-pl011", + .pm = &pl011_dev_pm_ops, ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), + }, + .id_table = pl011_ids, + .probe = pl011_probe, +diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c +index fd80d999308d..0bdf1687983f 100644 +--- a/drivers/tty/serial/pic32_uart.c ++++ b/drivers/tty/serial/pic32_uart.c +@@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = { + .driver = { + .name = PIC32_DEV_NAME, + .of_match_table = of_match_ptr(pic32_serial_dt_ids), ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32), + }, + }; + +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 80bb56facfb6..ad126f51d549 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -205,10 +205,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + if (!state->xmit.buf) { + state->xmit.buf = (unsigned char *) page; + uart_circ_clear(&state->xmit); ++ uart_port_unlock(uport, flags); + } else { ++ uart_port_unlock(uport, flags); ++ /* ++ * Do not free() the page under the port lock, see ++ * uart_shutdown(). ++ */ + free_page(page); + } +- uart_port_unlock(uport, flags); + + retval = uport->ops->startup(uport); + if (retval == 0) { +@@ -268,6 +273,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) + struct uart_port *uport = uart_port_check(state); + struct tty_port *port = &state->port; + unsigned long flags = 0; ++ char *xmit_buf = NULL; + + /* + * Set the TTY IO error marker +@@ -298,14 +304,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) + tty_port_set_suspended(port, 0); + + /* +- * Free the transmit buffer page. ++ * Do not free() the transmit buffer page under the port lock since ++ * this can create various circular locking scenarios. For instance, ++ * console driver may need to allocate/free a debug object, which ++ * can endup in printk() recursion. + */ + uart_port_lock(state, flags); +- if (state->xmit.buf) { +- free_page((unsigned long)state->xmit.buf); +- state->xmit.buf = NULL; +- } ++ xmit_buf = state->xmit.buf; ++ state->xmit.buf = NULL; + uart_port_unlock(uport, flags); ++ ++ if (xmit_buf) ++ free_page((unsigned long)xmit_buf); + } + + /** +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c +index 6ed74735b58c..87d8dd90d605 100644 +--- a/drivers/tty/serial/xilinx_uartps.c ++++ b/drivers/tty/serial/xilinx_uartps.c +@@ -1608,6 +1608,7 @@ static struct platform_driver cdns_uart_platform_driver = { + .name = CDNS_UART_NAME, + .of_match_table = cdns_uart_of_match, + .pm = &cdns_uart_dev_pm_ops, ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART), + }, + }; + +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c +index 67d8a501d994..fea02c7ad4f4 100644 +--- a/drivers/usb/gadget/udc/renesas_usb3.c ++++ b/drivers/usb/gadget/udc/renesas_usb3.c +@@ -358,6 +358,7 @@ struct renesas_usb3 { + bool extcon_host; /* check id and set EXTCON_USB_HOST */ + bool extcon_usb; /* check vbus and set EXTCON_USB */ + bool forced_b_device; ++ bool start_to_connect; + }; + + #define gadget_to_renesas_usb3(_gadget) \ +@@ -476,7 +477,8 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3) + static void usb3_init_epc_registers(struct renesas_usb3 *usb3) + { + usb3_write(usb3, ~0, USB3_USB_INT_STA_1); +- usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); ++ if (!usb3->workaround_for_vbus) ++ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); + } + + static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3) +@@ -700,8 +702,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev) + usb3_set_mode_by_role_sw(usb3, host); + usb3_vbus_out(usb3, a_dev); + /* for A-Peripheral or forced B-device mode */ +- if ((!host && a_dev) || +- (usb3->workaround_for_vbus && usb3->forced_b_device)) ++ if ((!host && a_dev) || usb3->start_to_connect) + usb3_connect(usb3); + spin_unlock_irqrestore(&usb3->lock, flags); + } +@@ -2432,7 +2433,11 @@ static ssize_t renesas_usb3_b_device_write(struct file *file, + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + +- if (!strncmp(buf, "1", 1)) ++ usb3->start_to_connect = false; ++ if (usb3->workaround_for_vbus && usb3->forced_b_device && ++ !strncmp(buf, "2", 1)) ++ usb3->start_to_connect = true; ++ else if (!strncmp(buf, "1", 1)) + usb3->forced_b_device = true; + else + usb3->forced_b_device = false; +@@ -2440,7 +2445,7 @@ static ssize_t renesas_usb3_b_device_write(struct file *file, + if (usb3->workaround_for_vbus) + usb3_disconnect(usb3); + +- /* Let this driver call usb3_connect() anyway */ ++ /* Let this driver call usb3_connect() if needed */ + usb3_check_id(usb3); + + return count; +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c +index c74cc9c309b1..3457c1fdebd1 100644 +--- a/drivers/usb/typec/tcpm.c ++++ b/drivers/usb/typec/tcpm.c +@@ -317,6 +317,9 @@ struct tcpm_port { + /* Deadline in jiffies to exit src_try_wait state */ + unsigned long max_wait; + ++ /* port belongs to a self powered device */ ++ bool self_powered; ++ + #ifdef CONFIG_DEBUG_FS + struct dentry *dentry; + struct mutex logbuffer_lock; /* log buffer access lock */ +@@ -3257,7 +3260,8 @@ static void run_state_machine(struct tcpm_port *port) + case SRC_HARD_RESET_VBUS_OFF: + tcpm_set_vconn(port, true); + tcpm_set_vbus(port, false); +- tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST); ++ tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, ++ TYPEC_HOST); + tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); + break; + case SRC_HARD_RESET_VBUS_ON: +@@ -3270,7 +3274,8 @@ static void run_state_machine(struct tcpm_port *port) + memset(&port->pps_data, 0, sizeof(port->pps_data)); + tcpm_set_vconn(port, false); + tcpm_set_charge(port, false); +- tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE); ++ tcpm_set_roles(port, port->self_powered, TYPEC_SINK, ++ TYPEC_DEVICE); + /* + * VBUS may or may not toggle, depending on the adapter. + * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON +@@ -4415,6 +4420,8 @@ sink: + return -EINVAL; + port->operating_snk_mw = mw / 1000; + ++ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); ++ + return 0; + } + +@@ -4723,6 +4730,7 @@ static int tcpm_copy_caps(struct tcpm_port *port, + port->typec_caps.prefer_role = tcfg->default_role; + port->typec_caps.type = tcfg->type; + port->typec_caps.data = tcfg->data; ++ port->self_powered = port->tcpc->config->self_powered; + + return 0; + } +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c +index b2b283e48439..8fed470bb7e1 100644 +--- a/fs/btrfs/dev-replace.c ++++ b/fs/btrfs/dev-replace.c +@@ -800,39 +800,58 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) + case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; + btrfs_dev_replace_write_unlock(dev_replace); +- goto leave; ++ break; + case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: ++ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; ++ tgt_device = dev_replace->tgtdev; ++ src_device = dev_replace->srcdev; ++ btrfs_dev_replace_write_unlock(dev_replace); ++ btrfs_scrub_cancel(fs_info); ++ /* btrfs_dev_replace_finishing() will handle the cleanup part */ ++ btrfs_info_in_rcu(fs_info, ++ "dev_replace from %s (devid %llu) to %s canceled", ++ btrfs_dev_name(src_device), src_device->devid, ++ btrfs_dev_name(tgt_device)); ++ break; + case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: ++ /* ++ * Scrub doing the replace isn't running so we need to do the ++ * cleanup step of btrfs_dev_replace_finishing() here ++ */ + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; + tgt_device = dev_replace->tgtdev; + src_device = dev_replace->srcdev; + dev_replace->tgtdev = NULL; + dev_replace->srcdev = NULL; +- break; +- } +- dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; +- dev_replace->time_stopped = ktime_get_real_seconds(); +- dev_replace->item_needs_writeback = 1; +- btrfs_dev_replace_write_unlock(dev_replace); +- btrfs_scrub_cancel(fs_info); ++ dev_replace->replace_state = ++ BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; ++ dev_replace->time_stopped = ktime_get_real_seconds(); ++ dev_replace->item_needs_writeback = 1; + +- trans = btrfs_start_transaction(root, 0); +- if (IS_ERR(trans)) { +- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); +- return PTR_ERR(trans); +- } +- ret = btrfs_commit_transaction(trans); +- WARN_ON(ret); ++ btrfs_dev_replace_write_unlock(dev_replace); + +- btrfs_info_in_rcu(fs_info, +- "dev_replace from %s (devid %llu) to %s canceled", +- btrfs_dev_name(src_device), src_device->devid, +- btrfs_dev_name(tgt_device)); ++ btrfs_scrub_cancel(fs_info); ++ ++ trans = btrfs_start_transaction(root, 0); ++ if (IS_ERR(trans)) { ++ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); ++ return PTR_ERR(trans); ++ } ++ ret = btrfs_commit_transaction(trans); ++ WARN_ON(ret); + +- if (tgt_device) +- btrfs_destroy_dev_replace_tgtdev(tgt_device); ++ btrfs_info_in_rcu(fs_info, ++ "suspended dev_replace from %s (devid %llu) to %s canceled", ++ btrfs_dev_name(src_device), src_device->devid, ++ btrfs_dev_name(tgt_device)); ++ ++ if (tgt_device) ++ btrfs_destroy_dev_replace_tgtdev(tgt_device); ++ break; ++ default: ++ result = -EINVAL; ++ } + +-leave: + mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); + return result; + } +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 4f6dc56b4f4d..83b3a626c796 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6440,14 +6440,19 @@ fail_dir_item: + err = btrfs_del_root_ref(trans, key.objectid, + root->root_key.objectid, parent_ino, + &local_index, name, name_len); +- ++ if (err) ++ btrfs_abort_transaction(trans, err); + } else if (add_backref) { + u64 local_index; + int err; + + err = btrfs_del_inode_ref(trans, root, name, name_len, + ino, parent_ino, &local_index); ++ if (err) ++ btrfs_abort_transaction(trans, err); + } ++ ++ /* Return the original error code */ + return ret; + } + +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 223334f08530..0ee1cd4b56fb 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -4768,19 +4768,17 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + /* + * Use the number of data stripes to figure out how big this chunk + * is really going to be in terms of logical address space, +- * and compare that answer with the max chunk size ++ * and compare that answer with the max chunk size. If it's higher, ++ * we try to reduce stripe_size. + */ + if (stripe_size * data_stripes > max_chunk_size) { +- stripe_size = div_u64(max_chunk_size, data_stripes); +- +- /* bump the answer up to a 16MB boundary */ +- stripe_size = round_up(stripe_size, SZ_16M); +- + /* +- * But don't go higher than the limits we found while searching +- * for free extents ++ * Reduce stripe_size, round it up to a 16MB boundary again and ++ * then use it, unless it ends up being even bigger than the ++ * previous value we had already. + */ +- stripe_size = min(devices_info[ndevs - 1].max_avail, ++ stripe_size = min(round_up(div_u64(max_chunk_size, ++ data_stripes), SZ_16M), + stripe_size); + } + +@@ -7474,6 +7472,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) + struct btrfs_path *path; + struct btrfs_root *root = fs_info->dev_root; + struct btrfs_key key; ++ u64 prev_devid = 0; ++ u64 prev_dev_ext_end = 0; + int ret = 0; + + key.objectid = 1; +@@ -7518,10 +7518,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) + chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); + physical_len = btrfs_dev_extent_length(leaf, dext); + ++ /* Check if this dev extent overlaps with the previous one */ ++ if (devid == prev_devid && physical_offset < prev_dev_ext_end) { ++ btrfs_err(fs_info, ++"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", ++ devid, physical_offset, prev_dev_ext_end); ++ ret = -EUCLEAN; ++ goto out; ++ } ++ + ret = verify_one_dev_extent(fs_info, chunk_offset, devid, + physical_offset, physical_len); + if (ret < 0) + goto out; ++ prev_devid = devid; ++ prev_dev_ext_end = physical_offset + physical_len; ++ + ret = btrfs_next_item(root, path); + if (ret < 0) + goto out; +diff --git a/fs/iomap.c b/fs/iomap.c +index ec15cf2ec696..e57fb1e534c5 100644 +--- a/fs/iomap.c ++++ b/fs/iomap.c +@@ -488,16 +488,29 @@ done: + } + EXPORT_SYMBOL_GPL(iomap_readpages); + ++/* ++ * iomap_is_partially_uptodate checks whether blocks within a page are ++ * uptodate or not. ++ * ++ * Returns true if all blocks which correspond to a file portion ++ * we want to read within the page are uptodate. ++ */ + int + iomap_is_partially_uptodate(struct page *page, unsigned long from, + unsigned long count) + { + struct iomap_page *iop = to_iomap_page(page); + struct inode *inode = page->mapping->host; +- unsigned first = from >> inode->i_blkbits; +- unsigned last = (from + count - 1) >> inode->i_blkbits; ++ unsigned len, first, last; + unsigned i; + ++ /* Limit range to one page */ ++ len = min_t(unsigned, PAGE_SIZE - from, count); ++ ++ /* First and last blocks in range within page */ ++ first = from >> inode->i_blkbits; ++ last = (from + len - 1) >> inode->i_blkbits; ++ + if (iop) { + for (i = first; i <= last; i++) + if (!test_bit(i, iop->uptodate)) +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c +index 902a7dd10e5c..bb6ae387469f 100644 +--- a/fs/jffs2/super.c ++++ b/fs/jffs2/super.c +@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait) + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); + + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER +- cancel_delayed_work_sync(&c->wbuf_dwork); ++ if (jffs2_is_writebuffered(c)) ++ cancel_delayed_work_sync(&c->wbuf_dwork); + #endif + + mutex_lock(&c->alloc_sem); +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c +index 7642b6712c39..30208233f65b 100644 +--- a/fs/ocfs2/localalloc.c ++++ b/fs/ocfs2/localalloc.c +@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb) + if (num_used + || alloc->id1.bitmap1.i_used + || alloc->id1.bitmap1.i_total +- || la->la_bm_off) +- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n" ++ || la->la_bm_off) { ++ mlog(ML_ERROR, "inconsistent detected, clean journal with" ++ " unrecovered local alloc, please run fsck.ocfs2!\n" + "found = %u, set = %u, taken = %u, off = %u\n", + num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), + le32_to_cpu(alloc->id1.bitmap1.i_total), + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); + ++ status = -EINVAL; ++ goto bail; ++ } ++ + osb->local_alloc_bh = alloc_bh; + osb->local_alloc_state = OCFS2_LA_ENABLED; + +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c +index 0792595ebcfb..3c777ec80d47 100644 +--- a/fs/pstore/ram_core.c ++++ b/fs/pstore/ram_core.c +@@ -496,6 +496,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, + sig ^= PERSISTENT_RAM_SIG; + + if (prz->buffer->sig == sig) { ++ if (buffer_size(prz) == 0) { ++ pr_debug("found existing empty buffer\n"); ++ return 0; ++ } ++ + if (buffer_size(prz) > prz->buffer_size || + buffer_start(prz) > buffer_size(prz)) + pr_info("found existing invalid buffer, size %zu, start %zu\n", +diff --git a/fs/quota/quota.c b/fs/quota/quota.c +index f0cbf58ad4da..fd5dd806f1b9 100644 +--- a/fs/quota/quota.c ++++ b/fs/quota/quota.c +@@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd) + /* Return true if quotactl command is manipulating quota on/off state */ + static bool quotactl_cmd_onoff(int cmd) + { +- return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF); ++ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) || ++ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF); + } + + /* +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index 7a85e609fc27..d8b8323e80f4 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -736,10 +736,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, + struct userfaultfd_ctx *ctx; + + ctx = vma->vm_userfaultfd_ctx.ctx; +- if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) { ++ ++ if (!ctx) ++ return; ++ ++ if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { + vm_ctx->ctx = ctx; + userfaultfd_ctx_get(ctx); + WRITE_ONCE(ctx->mmap_changing, true); ++ } else { ++ /* Drop uffd context if remap feature not enabled */ ++ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; ++ vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + } + } + +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h +index 9a6bc0951cfa..c31157135598 100644 +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb) + */ + static inline void wb_put(struct bdi_writeback *wb) + { ++ if (WARN_ON_ONCE(!wb->bdi)) { ++ /* ++ * A driver bug might cause a file to be removed before bdi was ++ * initialized. ++ */ ++ return; ++ } ++ + if (wb != &wb->bdi->wb) + percpu_ref_put(&wb->refcnt); + } +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 6791a0ac0139..ec90d5255cf7 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -665,24 +665,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size) + return size; + } + +-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access, +- u32 size_default) +-{ +- size_default = bpf_ctx_off_adjust_machine(size_default); +- size_access = bpf_ctx_off_adjust_machine(size_access); +- +-#ifdef __LITTLE_ENDIAN +- return (off & (size_default - 1)) == 0; +-#else +- return (off & (size_default - 1)) + size_access == size_default; +-#endif +-} +- + static inline bool + bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) + { +- return bpf_ctx_narrow_align_ok(off, size, size_default) && +- size <= size_default && (size & (size - 1)) == 0; ++ return size <= size_default && (size & (size - 1)) == 0; + } + + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) +diff --git a/include/linux/swap.h b/include/linux/swap.h +index 8e2c11e692ba..77221c16733a 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -232,7 +232,6 @@ struct swap_info_struct { + unsigned long flags; /* SWP_USED etc: see above */ + signed short prio; /* swap priority of this type */ + struct plist_node list; /* entry in swap_active_head */ +- struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */ + signed char type; /* strange name for an index */ + unsigned int max; /* extent of the swap_map */ + unsigned char *swap_map; /* vmalloc'ed array of usage counts */ +@@ -273,6 +272,16 @@ struct swap_info_struct { + */ + struct work_struct discard_work; /* discard worker */ + struct swap_cluster_list discard_clusters; /* discard clusters list */ ++ struct plist_node avail_lists[0]; /* ++ * entries in swap_avail_heads, one ++ * entry per node. ++ * Must be last as the number of the ++ * array is nr_node_ids, which is not ++ * a fixed value so have to allocate ++ * dynamically. ++ * And it has to be an array so that ++ * plist_for_each_* can work. ++ */ + }; + + #ifdef CONFIG_64BIT +diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h +index 7e7fbfb84e8e..50c74a77db55 100644 +--- a/include/linux/usb/tcpm.h ++++ b/include/linux/usb/tcpm.h +@@ -89,6 +89,7 @@ struct tcpc_config { + enum typec_port_data data; + enum typec_role default_role; + bool try_role_hw; /* try.{src,snk} implemented in hardware */ ++ bool self_powered; /* port belongs to a self powered device */ + + const struct typec_altmode_desc *alt_modes; + }; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 7a4ee7852ca4..2cfd3b4573b0 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -602,6 +602,7 @@ struct se_session { + struct se_node_acl *se_node_acl; + struct se_portal_group *se_tpg; + void *fabric_sess_ptr; ++ struct percpu_ref cmd_count; + struct list_head sess_list; + struct list_head sess_acl_list; + struct list_head sess_cmd_list; +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index f4147b398431..eb9d0923c55c 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -116,7 +116,7 @@ struct se_session *target_setup_session(struct se_portal_group *, + struct se_session *, void *)); + void target_remove_session(struct se_session *); + +-void transport_init_session(struct se_session *); ++int transport_init_session(struct se_session *se_sess); + struct se_session *transport_alloc_session(enum target_prot_op); + int transport_alloc_session_tags(struct se_session *, unsigned int, + unsigned int); +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 2954e4b3abd5..341806668f03 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -3285,12 +3285,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) + return err; + + if (BPF_SRC(insn->code) == BPF_X) { ++ struct bpf_reg_state *src_reg = regs + insn->src_reg; ++ struct bpf_reg_state *dst_reg = regs + insn->dst_reg; ++ + if (BPF_CLASS(insn->code) == BPF_ALU64) { + /* case: R1 = R2 + * copy register state to dest reg + */ +- regs[insn->dst_reg] = regs[insn->src_reg]; +- regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; ++ *dst_reg = *src_reg; ++ dst_reg->live |= REG_LIVE_WRITTEN; + } else { + /* R1 = (u32) R2 */ + if (is_pointer_value(env, insn->src_reg)) { +@@ -3298,9 +3301,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) + "R%d partial copy of pointer\n", + insn->src_reg); + return -EACCES; ++ } else if (src_reg->type == SCALAR_VALUE) { ++ *dst_reg = *src_reg; ++ dst_reg->live |= REG_LIVE_WRITTEN; ++ } else { ++ mark_reg_unknown(env, regs, ++ insn->dst_reg); + } +- mark_reg_unknown(env, regs, insn->dst_reg); +- coerce_reg_to_size(®s[insn->dst_reg], 4); ++ coerce_reg_to_size(dst_reg, 4); + } + } else { + /* case: R = imm +@@ -5341,10 +5349,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) + int i, cnt, size, ctx_field_size, delta = 0; + const int insn_cnt = env->prog->len; + struct bpf_insn insn_buf[16], *insn; ++ u32 target_size, size_default, off; + struct bpf_prog *new_prog; + enum bpf_access_type type; + bool is_narrower_load; +- u32 target_size; + + if (ops->gen_prologue) { + cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, +@@ -5421,9 +5429,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) + * we will apply proper mask to the result. + */ + is_narrower_load = size < ctx_field_size; ++ size_default = bpf_ctx_off_adjust_machine(ctx_field_size); ++ off = insn->off; + if (is_narrower_load) { +- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); +- u32 off = insn->off; + u8 size_code; + + if (type == BPF_WRITE) { +@@ -5451,12 +5459,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) + } + + if (is_narrower_load && size < target_size) { +- if (ctx_field_size <= 4) ++ u8 shift = (off & (size_default - 1)) * 8; ++ ++ if (ctx_field_size <= 4) { ++ if (shift) ++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, ++ insn->dst_reg, ++ shift); + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, + (1 << size * 8) - 1); +- else ++ } else { ++ if (shift) ++ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, ++ insn->dst_reg, ++ shift); + insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, + (1 << size * 8) - 1); ++ } + } + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 84ae9bf5858a..ea4fd3af3b4b 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -2156,6 +2156,7 @@ int write_cache_pages(struct address_space *mapping, + { + int ret = 0; + int done = 0; ++ int error; + struct pagevec pvec; + int nr_pages; + pgoff_t uninitialized_var(writeback_index); +@@ -2236,25 +2237,31 @@ continue_unlock: + goto continue_unlock; + + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); +- ret = (*writepage)(page, wbc, data); +- if (unlikely(ret)) { +- if (ret == AOP_WRITEPAGE_ACTIVATE) { ++ error = (*writepage)(page, wbc, data); ++ if (unlikely(error)) { ++ /* ++ * Handle errors according to the type of ++ * writeback. There's no need to continue for ++ * background writeback. Just push done_index ++ * past this page so media errors won't choke ++ * writeout for the entire file. For integrity ++ * writeback, we must process the entire dirty ++ * set regardless of errors because the fs may ++ * still have state to clear for each page. In ++ * that case we continue processing and return ++ * the first error. ++ */ ++ if (error == AOP_WRITEPAGE_ACTIVATE) { + unlock_page(page); +- ret = 0; +- } else { +- /* +- * done_index is set past this page, +- * so media errors will not choke +- * background writeout for the entire +- * file. This has consequences for +- * range_cyclic semantics (ie. it may +- * not be suitable for data integrity +- * writeout). +- */ ++ error = 0; ++ } else if (wbc->sync_mode != WB_SYNC_ALL) { ++ ret = error; + done_index = page->index + 1; + done = 1; + break; + } ++ if (!ret) ++ ret = error; + } + + /* +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 67aaf7ae22ff..340ef3177686 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -2820,8 +2820,9 @@ static struct swap_info_struct *alloc_swap_info(void) + struct swap_info_struct *p; + unsigned int type; + int i; ++ int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node); + +- p = kvzalloc(sizeof(*p), GFP_KERNEL); ++ p = kvzalloc(size, GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index f12555f23a49..7f800c3480f7 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -5668,6 +5668,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, + return true; + } + ++ /* Check if request ended in Command Status - no way to retreive ++ * any extra parameters in this case. ++ */ ++ if (hdr->evt == HCI_EV_CMD_STATUS) ++ return false; ++ + if (hdr->evt != HCI_EV_CMD_COMPLETE) { + bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", + hdr->evt); +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c +index 5372e2042adf..2cb8da465b98 100644 +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); + + int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) + { ++ skb->tstamp = 0; + return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, + net, sk, skb, NULL, skb->dev, + br_dev_queue_push_xmit); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index abbbd7fd17fe..589ec5b9ec5f 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -5258,7 +5258,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long chunk; + struct sk_buff *skb; + struct page *page; +- gfp_t gfp_head; + int i; + + *errcode = -EMSGSIZE; +@@ -5268,12 +5267,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + if (npages > MAX_SKB_FRAGS) + return NULL; + +- gfp_head = gfp_mask; +- if (gfp_head & __GFP_DIRECT_RECLAIM) +- gfp_head |= __GFP_RETRY_MAYFAIL; +- + *errcode = -ENOBUFS; +- skb = alloc_skb(header_len, gfp_head); ++ skb = alloc_skb(header_len, gfp_mask); + if (!skb) + return NULL; + +diff --git a/net/core/sock.c b/net/core/sock.c +index 5a8a3b76832f..c9668dcb5eb9 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -698,6 +698,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, + break; + case SO_DONTROUTE: + sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); ++ sk_dst_reset(sk); + break; + case SO_BROADCAST: + sock_valbool_flag(sk, SOCK_BROADCAST, valbool); +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c +index 2c8d313ae216..fb1e7f237f53 100644 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c +@@ -57,17 +57,14 @@ struct clusterip_config { + enum clusterip_hashmode hash_mode; /* which hashing mode */ + u_int32_t hash_initval; /* hash initialization */ + struct rcu_head rcu; +- ++ struct net *net; /* netns for pernet list */ + char ifname[IFNAMSIZ]; /* device ifname */ +- struct notifier_block notifier; /* refresh c->ifindex in it */ + }; + + #ifdef CONFIG_PROC_FS + static const struct file_operations clusterip_proc_fops; + #endif + +-static unsigned int clusterip_net_id __read_mostly; +- + struct clusterip_net { + struct list_head configs; + /* lock protects the configs list */ +@@ -78,16 +75,30 @@ struct clusterip_net { + #endif + }; + ++static unsigned int clusterip_net_id __read_mostly; ++static inline struct clusterip_net *clusterip_pernet(struct net *net) ++{ ++ return net_generic(net, clusterip_net_id); ++} ++ + static inline void + clusterip_config_get(struct clusterip_config *c) + { + refcount_inc(&c->refcount); + } + +- + static void clusterip_config_rcu_free(struct rcu_head *head) + { +- kfree(container_of(head, struct clusterip_config, rcu)); ++ struct clusterip_config *config; ++ struct net_device *dev; ++ ++ config = container_of(head, struct clusterip_config, rcu); ++ dev = dev_get_by_name(config->net, config->ifname); ++ if (dev) { ++ dev_mc_del(dev, config->clustermac); ++ dev_put(dev); ++ } ++ kfree(config); + } + + static inline void +@@ -101,9 +112,9 @@ clusterip_config_put(struct clusterip_config *c) + * entry(rule) is removed, remove the config from lists, but don't free it + * yet, since proc-files could still be holding references */ + static inline void +-clusterip_config_entry_put(struct net *net, struct clusterip_config *c) ++clusterip_config_entry_put(struct clusterip_config *c) + { +- struct clusterip_net *cn = net_generic(net, clusterip_net_id); ++ struct clusterip_net *cn = clusterip_pernet(c->net); + + local_bh_disable(); + if (refcount_dec_and_lock(&c->entries, &cn->lock)) { +@@ -118,8 +129,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) + spin_unlock(&cn->lock); + local_bh_enable(); + +- unregister_netdevice_notifier(&c->notifier); +- + return; + } + local_bh_enable(); +@@ -129,7 +138,7 @@ static struct clusterip_config * + __clusterip_config_find(struct net *net, __be32 clusterip) + { + struct clusterip_config *c; +- struct clusterip_net *cn = net_generic(net, clusterip_net_id); ++ struct clusterip_net *cn = clusterip_pernet(net); + + list_for_each_entry_rcu(c, &cn->configs, list) { + if (c->clusterip == clusterip) +@@ -181,32 +190,37 @@ clusterip_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) + { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); ++ struct net *net = dev_net(dev); ++ struct clusterip_net *cn = clusterip_pernet(net); + struct clusterip_config *c; + +- c = container_of(this, struct clusterip_config, notifier); +- switch (event) { +- case NETDEV_REGISTER: +- if (!strcmp(dev->name, c->ifname)) { +- c->ifindex = dev->ifindex; +- dev_mc_add(dev, c->clustermac); +- } +- break; +- case NETDEV_UNREGISTER: +- if (dev->ifindex == c->ifindex) { +- dev_mc_del(dev, c->clustermac); +- c->ifindex = -1; +- } +- break; +- case NETDEV_CHANGENAME: +- if (!strcmp(dev->name, c->ifname)) { +- c->ifindex = dev->ifindex; +- dev_mc_add(dev, c->clustermac); +- } else if (dev->ifindex == c->ifindex) { +- dev_mc_del(dev, c->clustermac); +- c->ifindex = -1; ++ spin_lock_bh(&cn->lock); ++ list_for_each_entry_rcu(c, &cn->configs, list) { ++ switch (event) { ++ case NETDEV_REGISTER: ++ if (!strcmp(dev->name, c->ifname)) { ++ c->ifindex = dev->ifindex; ++ dev_mc_add(dev, c->clustermac); ++ } ++ break; ++ case NETDEV_UNREGISTER: ++ if (dev->ifindex == c->ifindex) { ++ dev_mc_del(dev, c->clustermac); ++ c->ifindex = -1; ++ } ++ break; ++ case NETDEV_CHANGENAME: ++ if (!strcmp(dev->name, c->ifname)) { ++ c->ifindex = dev->ifindex; ++ dev_mc_add(dev, c->clustermac); ++ } else if (dev->ifindex == c->ifindex) { ++ dev_mc_del(dev, c->clustermac); ++ c->ifindex = -1; ++ } ++ break; + } +- break; + } ++ spin_unlock_bh(&cn->lock); + + return NOTIFY_DONE; + } +@@ -215,30 +229,44 @@ static struct clusterip_config * + clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, + __be32 ip, const char *iniface) + { +- struct clusterip_net *cn = net_generic(net, clusterip_net_id); ++ struct clusterip_net *cn = clusterip_pernet(net); + struct clusterip_config *c; ++ struct net_device *dev; + int err; + ++ if (iniface[0] == '\0') { ++ pr_info("Please specify an interface name\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ + c = kzalloc(sizeof(*c), GFP_ATOMIC); + if (!c) + return ERR_PTR(-ENOMEM); + +- strcpy(c->ifname, iniface); +- c->ifindex = -1; +- c->clusterip = ip; ++ dev = dev_get_by_name(net, iniface); ++ if (!dev) { ++ pr_info("no such interface %s\n", iniface); ++ kfree(c); ++ return ERR_PTR(-ENOENT); ++ } ++ c->ifindex = dev->ifindex; ++ strcpy(c->ifname, dev->name); + memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); ++ dev_mc_add(dev, c->clustermac); ++ dev_put(dev); ++ ++ c->clusterip = ip; + c->num_total_nodes = i->num_total_nodes; + clusterip_config_init_nodelist(c, i); + c->hash_mode = i->hash_mode; + c->hash_initval = i->hash_initval; ++ c->net = net; + refcount_set(&c->refcount, 1); + + spin_lock_bh(&cn->lock); + if (__clusterip_config_find(net, ip)) { +- spin_unlock_bh(&cn->lock); +- kfree(c); +- +- return ERR_PTR(-EBUSY); ++ err = -EBUSY; ++ goto out_config_put; + } + + list_add_rcu(&c->list, &cn->configs); +@@ -260,22 +288,17 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, + } + #endif + +- c->notifier.notifier_call = clusterip_netdev_event; +- err = register_netdevice_notifier(&c->notifier); +- if (!err) { +- refcount_set(&c->entries, 1); +- return c; +- } ++ refcount_set(&c->entries, 1); ++ return c; + + #ifdef CONFIG_PROC_FS +- proc_remove(c->pde); + err: + #endif + spin_lock_bh(&cn->lock); + list_del_rcu(&c->list); ++out_config_put: + spin_unlock_bh(&cn->lock); + clusterip_config_put(c); +- + return ERR_PTR(err); + } + +@@ -475,34 +498,20 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) + &e->ip.dst.s_addr); + return -EINVAL; + } else { +- struct net_device *dev; +- +- if (e->ip.iniface[0] == '\0') { +- pr_info("Please specify an interface name\n"); +- return -EINVAL; +- } +- +- dev = dev_get_by_name(par->net, e->ip.iniface); +- if (!dev) { +- pr_info("no such interface %s\n", +- e->ip.iniface); +- return -ENOENT; +- } +- dev_put(dev); +- + config = clusterip_config_init(par->net, cipinfo, + e->ip.dst.s_addr, + e->ip.iniface); + if (IS_ERR(config)) + return PTR_ERR(config); + } +- } ++ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) ++ return -EINVAL; + + ret = nf_ct_netns_get(par->net, par->family); + if (ret < 0) { + pr_info("cannot load conntrack support for proto=%u\n", + par->family); +- clusterip_config_entry_put(par->net, config); ++ clusterip_config_entry_put(config); + clusterip_config_put(config); + return ret; + } +@@ -524,7 +533,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par) + + /* if no more entries are referencing the config, remove it + * from the list and destroy the proc entry */ +- clusterip_config_entry_put(par->net, cipinfo->config); ++ clusterip_config_entry_put(cipinfo->config); + + clusterip_config_put(cipinfo->config); + +@@ -806,7 +815,7 @@ static const struct file_operations clusterip_proc_fops = { + + static int clusterip_net_init(struct net *net) + { +- struct clusterip_net *cn = net_generic(net, clusterip_net_id); ++ struct clusterip_net *cn = clusterip_pernet(net); + int ret; + + INIT_LIST_HEAD(&cn->configs); +@@ -831,13 +840,12 @@ static int clusterip_net_init(struct net *net) + + static void clusterip_net_exit(struct net *net) + { +- struct clusterip_net *cn = net_generic(net, clusterip_net_id); ++ struct clusterip_net *cn = clusterip_pernet(net); + #ifdef CONFIG_PROC_FS + proc_remove(cn->procdir); + cn->procdir = NULL; + #endif + nf_unregister_net_hook(net, &cip_arp_ops); +- WARN_ON_ONCE(!list_empty(&cn->configs)); + } + + static struct pernet_operations clusterip_net_ops = { +@@ -847,6 +855,10 @@ static struct pernet_operations clusterip_net_ops = { + .size = sizeof(struct clusterip_net), + }; + ++struct notifier_block cip_netdev_notifier = { ++ .notifier_call = clusterip_netdev_event ++}; ++ + static int __init clusterip_tg_init(void) + { + int ret; +@@ -859,11 +871,17 @@ static int __init clusterip_tg_init(void) + if (ret < 0) + goto cleanup_subsys; + ++ ret = register_netdevice_notifier(&cip_netdev_notifier); ++ if (ret < 0) ++ goto unregister_target; ++ + pr_info("ClusterIP Version %s loaded successfully\n", + CLUSTERIP_VERSION); + + return 0; + ++unregister_target: ++ xt_unregister_target(&clusterip_tg_reg); + cleanup_subsys: + unregister_pernet_subsys(&clusterip_net_ops); + return ret; +@@ -873,6 +891,7 @@ static void __exit clusterip_tg_exit(void) + { + pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION); + ++ unregister_netdevice_notifier(&cip_netdev_notifier); + xt_unregister_target(&clusterip_tg_reg); + unregister_pernet_subsys(&clusterip_net_ops); + +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c +index 9a4261e50272..506b2ae07bb3 100644 +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -309,6 +309,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, + + /* Check if the address belongs to the host. */ + if (addr_type == IPV6_ADDR_MAPPED) { ++ struct net_device *dev = NULL; + int chk_addr_ret; + + /* Binding to v4-mapped address on a v6-only socket +@@ -319,9 +320,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, + goto out; + } + ++ rcu_read_lock(); ++ if (sk->sk_bound_dev_if) { ++ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); ++ if (!dev) { ++ err = -ENODEV; ++ goto out_unlock; ++ } ++ } ++ + /* Reproduce AF_INET checks to make the bindings consistent */ + v4addr = addr->sin6_addr.s6_addr32[3]; +- chk_addr_ret = inet_addr_type(net, v4addr); ++ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); ++ rcu_read_unlock(); ++ + if (!inet_can_nonlocal_bind(net, inet) && + v4addr != htonl(INADDR_ANY) && + chk_addr_ret != RTN_LOCAL && +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c +index c00b6a2e8e3c..13ade5782847 100644 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c +@@ -219,10 +219,6 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); + u32 ip; + +- /* MAC can be src only */ +- if (!(opt->flags & IPSET_DIM_TWO_SRC)) +- return 0; +- + ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); + if (ip < map->first_ip || ip > map->last_ip) + return -IPSET_ERR_BITMAP_RANGE; +@@ -233,7 +229,11 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, + return -EINVAL; + + e.id = ip_to_id(map, ip); +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); ++ ++ if (opt->flags & IPSET_DIM_ONE_SRC) ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source); ++ else ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); + + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); + } +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c +index 1ab5ed2f6839..fd87de3ed55b 100644 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c +@@ -103,7 +103,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb, + (skb_mac_header(skb) + ETH_HLEN) > skb->data) + return -EINVAL; + +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); ++ if (opt->flags & IPSET_DIM_ONE_SRC) ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source); ++ else ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); ++ + if (ether_addr_equal(e.ether, invalid_ether)) + return -EINVAL; + +@@ -211,15 +215,15 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb, + }; + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); + +- /* MAC can be src only */ +- if (!(opt->flags & IPSET_DIM_TWO_SRC)) +- return 0; +- + if (skb_mac_header(skb) < skb->head || + (skb_mac_header(skb) + ETH_HLEN) > skb->data) + return -EINVAL; + +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); ++ if (opt->flags & IPSET_DIM_ONE_SRC) ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source); ++ else ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); ++ + if (ether_addr_equal(e.ether, invalid_ether)) + return -EINVAL; + +diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c +index f9d5a2a1e3d0..4fe5f243d0a3 100644 +--- a/net/netfilter/ipset/ip_set_hash_mac.c ++++ b/net/netfilter/ipset/ip_set_hash_mac.c +@@ -81,15 +81,15 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb, + struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } }; + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); + +- /* MAC can be src only */ +- if (!(opt->flags & IPSET_DIM_ONE_SRC)) +- return 0; +- + if (skb_mac_header(skb) < skb->head || + (skb_mac_header(skb) + ETH_HLEN) > skb->data) + return -EINVAL; + +- ether_addr_copy(e.ether, eth_hdr(skb)->h_source); ++ if (opt->flags & IPSET_DIM_ONE_SRC) ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source); ++ else ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); ++ + if (is_zero_ether_addr(e.ether)) + return -EINVAL; + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); +diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c +index 904e775d1a44..cf40a8284a38 100644 +--- a/samples/bpf/bpf_load.c ++++ b/samples/bpf/bpf_load.c +@@ -55,6 +55,23 @@ static int populate_prog_array(const char *event, int prog_fd) + return 0; + } + ++static int write_kprobe_events(const char *val) ++{ ++ int fd, ret, flags; ++ ++ if ((val != NULL) && (val[0] == '\0')) ++ flags = O_WRONLY | O_TRUNC; ++ else ++ flags = O_WRONLY | O_APPEND; ++ ++ fd = open("/sys/kernel/debug/tracing/kprobe_events", flags); ++ ++ ret = write(fd, val, strlen(val)); ++ close(fd); ++ ++ return ret; ++} ++ + static int load_and_attach(const char *event, struct bpf_insn *prog, int size) + { + bool is_socket = strncmp(event, "socket", 6) == 0; +@@ -166,10 +183,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) + + #ifdef __x86_64__ + if (strncmp(event, "sys_", 4) == 0) { +- snprintf(buf, sizeof(buf), +- "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events", +- is_kprobe ? 'p' : 'r', event, event); +- err = system(buf); ++ snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s", ++ is_kprobe ? 'p' : 'r', event, event); ++ err = write_kprobe_events(buf); + if (err >= 0) { + need_normal_check = false; + event_prefix = "__x64_"; +@@ -177,10 +193,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) + } + #endif + if (need_normal_check) { +- snprintf(buf, sizeof(buf), +- "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events", +- is_kprobe ? 'p' : 'r', event, event); +- err = system(buf); ++ snprintf(buf, sizeof(buf), "%c:%s %s", ++ is_kprobe ? 'p' : 'r', event, event); ++ err = write_kprobe_events(buf); + if (err < 0) { + printf("failed to create kprobe '%s' error '%s'\n", + event, strerror(errno)); +@@ -520,7 +535,7 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) + return 1; + + /* clear all kprobes */ +- i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events"); ++ i = write_kprobe_events(""); + + /* scan over all elf sections to get license and map info */ + for (i = 1; i < ehdr.e_shnum; i++) { +diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l +index 25bd2b89fe3f..c2f577d71964 100644 +--- a/scripts/kconfig/zconf.l ++++ b/scripts/kconfig/zconf.l +@@ -73,7 +73,7 @@ static void warn_ignored_character(char chr) + { + fprintf(stderr, + "%s:%d:warning: ignoring unsupported character '%c'\n", +- zconf_curname(), zconf_lineno(), chr); ++ current_file->name, yylineno, chr); + } + %} + +@@ -221,6 +221,8 @@ n [A-Za-z0-9_-] + } + <<EOF>> { + BEGIN(INITIAL); ++ yylval.string = text; ++ return T_WORD_QUOTE; + } + } + +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index fe251c6f09f1..3c3878f0d2fa 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -2934,7 +2934,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) + return rc; + + /* Allow all mounts performed by the kernel */ +- if (flags & MS_KERNMOUNT) ++ if (flags & (MS_KERNMOUNT | MS_SUBMOUNT)) + return 0; + + ad.type = LSM_AUDIT_DATA_DENTRY; +diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig +index 529d9f405fa9..0cb65d0864cc 100644 +--- a/sound/firewire/Kconfig ++++ b/sound/firewire/Kconfig +@@ -41,6 +41,7 @@ config SND_OXFW + * Mackie(Loud) U.420/U.420d + * TASCAM FireOne + * Stanton Controllers & Systems 1 Deck/Mixer ++ * APOGEE duet FireWire + + To compile this driver as a module, choose M here: the module + will be called snd-oxfw. +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c +index 93676354f87f..de4af8a41ff0 100644 +--- a/sound/firewire/bebob/bebob.c ++++ b/sound/firewire/bebob/bebob.c +@@ -434,7 +434,7 @@ static const struct ieee1394_device_id bebob_id_table[] = { + /* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */ + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal), + /* Apogee Electronics, Ensemble */ +- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal), ++ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal), + /* ESI, Quatafire610 */ + SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal), + /* AcousticReality, eARMasterOne */ +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c +index 2ea8be6c8584..5f82a375725a 100644 +--- a/sound/firewire/oxfw/oxfw.c ++++ b/sound/firewire/oxfw/oxfw.c +@@ -20,6 +20,7 @@ + #define VENDOR_LACIE 0x00d04b + #define VENDOR_TASCAM 0x00022e + #define OUI_STANTON 0x001260 ++#define OUI_APOGEE 0x0003db + + #define MODEL_SATELLITE 0x00200f + +@@ -436,6 +437,13 @@ static const struct ieee1394_device_id oxfw_id_table[] = { + .vendor_id = OUI_STANTON, + .model_id = 0x002000, + }, ++ // APOGEE, duet FireWire ++ { ++ .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_MODEL_ID, ++ .vendor_id = OUI_APOGEE, ++ .model_id = 0x01dddd, ++ }, + { } + }; + MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table); +diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c +index 3135e9eafd18..7f376b63a166 100644 +--- a/sound/soc/amd/acp-pcm-dma.c ++++ b/sound/soc/amd/acp-pcm-dma.c +@@ -1147,18 +1147,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd) + struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, + DRV_NAME); + struct audio_drv_data *adata = dev_get_drvdata(component->dev); ++ struct device *parent = component->dev->parent; + + switch (adata->asic_type) { + case CHIP_STONEY: + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, +- NULL, ST_MIN_BUFFER, ++ parent, ++ ST_MIN_BUFFER, + ST_MAX_BUFFER); + break; + default: + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, +- NULL, MIN_BUFFER, ++ parent, ++ MIN_BUFFER, + MAX_BUFFER); + break; + } +diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c +index 3356c91f55b0..e3de1ff3b6c2 100644 +--- a/sound/soc/codecs/pcm3168a.c ++++ b/sound/soc/codecs/pcm3168a.c +@@ -688,15 +688,22 @@ err_clk: + } + EXPORT_SYMBOL_GPL(pcm3168a_probe); + +-void pcm3168a_remove(struct device *dev) ++static void pcm3168a_disable(struct device *dev) + { + struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev); + +- pm_runtime_disable(dev); + regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies), +- pcm3168a->supplies); ++ pcm3168a->supplies); + clk_disable_unprepare(pcm3168a->scki); + } ++ ++void pcm3168a_remove(struct device *dev) ++{ ++ pm_runtime_disable(dev); ++#ifndef CONFIG_PM ++ pcm3168a_disable(dev); ++#endif ++} + EXPORT_SYMBOL_GPL(pcm3168a_remove); + + #ifdef CONFIG_PM +@@ -751,10 +758,7 @@ static int pcm3168a_rt_suspend(struct device *dev) + + regcache_cache_only(pcm3168a->regmap, true); + +- regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies), +- pcm3168a->supplies); +- +- clk_disable_unprepare(pcm3168a->scki); ++ pcm3168a_disable(dev); + + return 0; + } +diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile +index 95563b8e1ad7..ed61fb3a46c0 100644 +--- a/tools/lib/subcmd/Makefile ++++ b/tools/lib/subcmd/Makefile +@@ -36,8 +36,6 @@ endif + CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE + + CFLAGS += -I$(srctree)/tools/include/ +-CFLAGS += -I$(srctree)/include/uapi +-CFLAGS += -I$(srctree)/include + + SUBCMD_IN := $(OUTPUT)libsubcmd-in.o + +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config +index e30d20fb482d..f00ea77f5f08 100644 +--- a/tools/perf/Makefile.config ++++ b/tools/perf/Makefile.config +@@ -294,6 +294,8 @@ ifndef NO_BIONIC + $(call feature_check,bionic) + ifeq ($(feature-bionic), 1) + BIONIC := 1 ++ CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE ++ CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE + EXTLIBS := $(filter-out -lrt,$(EXTLIBS)) + EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) + endif +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c +index db0ba8caf5a2..ba8ecaf52200 100644 +--- a/tools/perf/arch/x86/util/intel-pt.c ++++ b/tools/perf/arch/x86/util/intel-pt.c +@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, + struct perf_evsel *evsel) + { + int err; ++ char c; + + if (!evsel) + return 0; + ++ /* ++ * If supported, force pass-through config term (pt=1) even if user ++ * sets pt=0, which avoids senseless kernel errors. ++ */ ++ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && ++ !(evsel->attr.config & 1)) { ++ pr_warning("pt=0 doesn't make sense, forcing pt=1\n"); ++ evsel->attr.config |= 1; ++ } ++ + err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", + "cyc_thresh", "caps/psb_cyc", + evsel->attr.config); +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index d097b5b47eb8..40720150ccd8 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -1961,7 +1961,7 @@ static int parse_metric_groups(const struct option *opt, + return metricgroup__parse_groups(opt, str, &metric_events); + } + +-static const struct option stat_options[] = { ++static struct option stat_options[] = { + OPT_BOOLEAN('T', "transaction", &transaction_run, + "hardware transaction statistics"), + OPT_CALLBACK('e', "event", &evsel_list, "event", +@@ -2847,6 +2847,12 @@ int cmd_stat(int argc, const char **argv) + return -ENOMEM; + + parse_events__shrink_config_terms(); ++ ++ /* String-parsing callback-based options would segfault when negated */ ++ set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG); ++ set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG); ++ set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG); ++ + argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, + (const char **) stat_usage, + PARSE_OPT_STOP_AT_NON_OPTION); +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c +index a827919c6263..775b99833e51 100644 +--- a/tools/perf/builtin-timechart.c ++++ b/tools/perf/builtin-timechart.c +@@ -43,6 +43,10 @@ + #include "util/data.h" + #include "util/debug.h" + ++#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE ++FILE *open_memstream(char **ptr, size_t *sizeloc); ++#endif ++ + #define SUPPORT_OLD_POWER_EVENTS 1 + #define PWR_EVENT_EXIT -1 + +diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +index 36c903faed0b..71e9737f4614 100644 +--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json ++++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +@@ -73,7 +73,7 @@ + }, + { + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )", ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", + "MetricGroup": "Memory_Bound;Memory_Lat", + "MetricName": "Load_Miss_Real_Latency" + }, +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +index 36c903faed0b..71e9737f4614 100644 +--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json ++++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +@@ -73,7 +73,7 @@ + }, + { + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )", ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", + "MetricGroup": "Memory_Bound;Memory_Lat", + "MetricName": "Load_Miss_Real_Latency" + }, +diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c +index a467615c5a0e..910e25e64188 100644 +--- a/tools/perf/tests/bp_signal.c ++++ b/tools/perf/tests/bp_signal.c +@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused + + bool test__bp_signal_is_supported(void) + { +-/* +- * The powerpc so far does not have support to even create +- * instruction breakpoint using the perf event interface. +- * Once it's there we can release this. +- */ +-#if defined(__powerpc__) || defined(__s390x__) ++ /* ++ * PowerPC and S390 do not support creation of instruction ++ * breakpoints using the perf_event interface. ++ * ++ * ARM requires explicit rounding down of the instruction ++ * pointer in Thumb mode, and then requires the single-step ++ * to be handled explicitly in the overflow handler to avoid ++ * stepping into the SIGIO handler and getting stuck on the ++ * breakpointed instruction. ++ * ++ * Just disable the test for these architectures until these ++ * issues are resolved. ++ */ ++#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) + return false; + #else + return true; +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c +index ca577658e890..7b5e15cc6b71 100644 +--- a/tools/perf/util/cs-etm.c ++++ b/tools/perf/util/cs-etm.c +@@ -1005,7 +1005,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) + } + + swap_packet: +- if (etmq->etm->synth_opts.last_branch) { ++ if (etm->sample_branches || etm->synth_opts.last_branch) { + /* + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for + * the next incoming packet. +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c +index be440df29615..819aa4491b53 100644 +--- a/tools/perf/util/evlist.c ++++ b/tools/perf/util/evlist.c +@@ -34,6 +34,10 @@ + #include <linux/log2.h> + #include <linux/err.h> + ++#ifdef LACKS_SIGQUEUE_PROTOTYPE ++int sigqueue(pid_t pid, int sig, const union sigval value); ++#endif ++ + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) + #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) + +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c +index f8cd3e7c9186..ebb18a9bc460 100644 +--- a/tools/perf/util/parse-events.c ++++ b/tools/perf/util/parse-events.c +@@ -2454,7 +2454,7 @@ restart: + if (!name_only && strlen(syms->alias)) + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); + else +- strncpy(name, syms->symbol, MAX_NAME_LEN); ++ strlcpy(name, syms->symbol, MAX_NAME_LEN); + + evt_list[evt_i] = strdup(name); + if (evt_list[evt_i] == NULL) +diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c +index 1cbada2dc6be..f735ee038713 100644 +--- a/tools/perf/util/svghelper.c ++++ b/tools/perf/util/svghelper.c +@@ -334,7 +334,7 @@ static char *cpu_model(void) + if (file) { + while (fgets(buf, 255, file)) { + if (strstr(buf, "model name")) { +- strncpy(cpu_m, &buf[13], 255); ++ strlcpy(cpu_m, &buf[13], 255); + break; + } + } +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile +index fff7fb1285fc..f3f874ba186b 100644 +--- a/tools/testing/selftests/bpf/Makefile ++++ b/tools/testing/selftests/bpf/Makefile +@@ -124,6 +124,16 @@ endif + endif + endif + ++# Have one program compiled without "-target bpf" to test whether libbpf loads ++# it successfully ++$(OUTPUT)/test_xdp.o: test_xdp.c ++ $(CLANG) $(CLANG_FLAGS) \ ++ -O2 -emit-llvm -c $< -o - | \ ++ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ ++ifeq ($(DWARF2BTF),y) ++ $(BTF_PAHOLE) -J $@ ++endif ++ + $(OUTPUT)/%.o: %.c + $(CLANG) $(CLANG_FLAGS) \ + -O2 -target bpf -emit-llvm -c $< -o - | \ +diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh +index d97dc914cd49..8b1bc96d8e0c 100755 +--- a/tools/testing/selftests/bpf/test_libbpf.sh ++++ b/tools/testing/selftests/bpf/test_libbpf.sh +@@ -33,17 +33,11 @@ trap exit_handler 0 2 3 6 9 + + libbpf_open_file test_l4lb.o + +-# TODO: fix libbpf to load noinline functions +-# [warning] libbpf: incorrect bpf_call opcode +-#libbpf_open_file test_l4lb_noinline.o ++# Load a program with BPF-to-BPF calls ++libbpf_open_file test_l4lb_noinline.o + +-# TODO: fix test_xdp_meta.c to load with libbpf +-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version +-#libbpf_open_file test_xdp_meta.o +- +-# TODO: fix libbpf to handle .eh_frame +-# [warning] libbpf: relocation failed: no section(10) +-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o ++# Load a program compiled without the "-target bpf" flag ++libbpf_open_file test_xdp.o + + # Success + exit 0 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c +index e436b67f2426..9db5a7378f40 100644 +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -2748,6 +2748,19 @@ static struct bpf_test tests[] = { + .result_unpriv = REJECT, + .result = ACCEPT, + }, ++ { ++ "alu32: mov u32 const", ++ .insns = { ++ BPF_MOV32_IMM(BPF_REG_7, 0), ++ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1), ++ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7), ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .retval = 0, ++ }, + { + "unpriv: partial copy of pointer", + .insns = { +diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h +index 6ae3730c4ee3..76d654ef3234 100644 +--- a/tools/testing/selftests/kselftest_harness.h ++++ b/tools/testing/selftests/kselftest_harness.h +@@ -354,7 +354,7 @@ + * ASSERT_EQ(expected, measured): expected == measured + */ + #define ASSERT_EQ(expected, seen) \ +- __EXPECT(expected, seen, ==, 1) ++ __EXPECT(expected, #expected, seen, #seen, ==, 1) + + /** + * ASSERT_NE(expected, seen) +@@ -365,7 +365,7 @@ + * ASSERT_NE(expected, measured): expected != measured + */ + #define ASSERT_NE(expected, seen) \ +- __EXPECT(expected, seen, !=, 1) ++ __EXPECT(expected, #expected, seen, #seen, !=, 1) + + /** + * ASSERT_LT(expected, seen) +@@ -376,7 +376,7 @@ + * ASSERT_LT(expected, measured): expected < measured + */ + #define ASSERT_LT(expected, seen) \ +- __EXPECT(expected, seen, <, 1) ++ __EXPECT(expected, #expected, seen, #seen, <, 1) + + /** + * ASSERT_LE(expected, seen) +@@ -387,7 +387,7 @@ + * ASSERT_LE(expected, measured): expected <= measured + */ + #define ASSERT_LE(expected, seen) \ +- __EXPECT(expected, seen, <=, 1) ++ __EXPECT(expected, #expected, seen, #seen, <=, 1) + + /** + * ASSERT_GT(expected, seen) +@@ -398,7 +398,7 @@ + * ASSERT_GT(expected, measured): expected > measured + */ + #define ASSERT_GT(expected, seen) \ +- __EXPECT(expected, seen, >, 1) ++ __EXPECT(expected, #expected, seen, #seen, >, 1) + + /** + * ASSERT_GE(expected, seen) +@@ -409,7 +409,7 @@ + * ASSERT_GE(expected, measured): expected >= measured + */ + #define ASSERT_GE(expected, seen) \ +- __EXPECT(expected, seen, >=, 1) ++ __EXPECT(expected, #expected, seen, #seen, >=, 1) + + /** + * ASSERT_NULL(seen) +@@ -419,7 +419,7 @@ + * ASSERT_NULL(measured): NULL == measured + */ + #define ASSERT_NULL(seen) \ +- __EXPECT(NULL, seen, ==, 1) ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 1) + + /** + * ASSERT_TRUE(seen) +@@ -429,7 +429,7 @@ + * ASSERT_TRUE(measured): measured != 0 + */ + #define ASSERT_TRUE(seen) \ +- ASSERT_NE(0, seen) ++ __EXPECT(0, "0", seen, #seen, !=, 1) + + /** + * ASSERT_FALSE(seen) +@@ -439,7 +439,7 @@ + * ASSERT_FALSE(measured): measured == 0 + */ + #define ASSERT_FALSE(seen) \ +- ASSERT_EQ(0, seen) ++ __EXPECT(0, "0", seen, #seen, ==, 1) + + /** + * ASSERT_STREQ(expected, seen) +@@ -472,7 +472,7 @@ + * EXPECT_EQ(expected, measured): expected == measured + */ + #define EXPECT_EQ(expected, seen) \ +- __EXPECT(expected, seen, ==, 0) ++ __EXPECT(expected, #expected, seen, #seen, ==, 0) + + /** + * EXPECT_NE(expected, seen) +@@ -483,7 +483,7 @@ + * EXPECT_NE(expected, measured): expected != measured + */ + #define EXPECT_NE(expected, seen) \ +- __EXPECT(expected, seen, !=, 0) ++ __EXPECT(expected, #expected, seen, #seen, !=, 0) + + /** + * EXPECT_LT(expected, seen) +@@ -494,7 +494,7 @@ + * EXPECT_LT(expected, measured): expected < measured + */ + #define EXPECT_LT(expected, seen) \ +- __EXPECT(expected, seen, <, 0) ++ __EXPECT(expected, #expected, seen, #seen, <, 0) + + /** + * EXPECT_LE(expected, seen) +@@ -505,7 +505,7 @@ + * EXPECT_LE(expected, measured): expected <= measured + */ + #define EXPECT_LE(expected, seen) \ +- __EXPECT(expected, seen, <=, 0) ++ __EXPECT(expected, #expected, seen, #seen, <=, 0) + + /** + * EXPECT_GT(expected, seen) +@@ -516,7 +516,7 @@ + * EXPECT_GT(expected, measured): expected > measured + */ + #define EXPECT_GT(expected, seen) \ +- __EXPECT(expected, seen, >, 0) ++ __EXPECT(expected, #expected, seen, #seen, >, 0) + + /** + * EXPECT_GE(expected, seen) +@@ -527,7 +527,7 @@ + * EXPECT_GE(expected, measured): expected >= measured + */ + #define EXPECT_GE(expected, seen) \ +- __EXPECT(expected, seen, >=, 0) ++ __EXPECT(expected, #expected, seen, #seen, >=, 0) + + /** + * EXPECT_NULL(seen) +@@ -537,7 +537,7 @@ + * EXPECT_NULL(measured): NULL == measured + */ + #define EXPECT_NULL(seen) \ +- __EXPECT(NULL, seen, ==, 0) ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 0) + + /** + * EXPECT_TRUE(seen) +@@ -547,7 +547,7 @@ + * EXPECT_TRUE(measured): 0 != measured + */ + #define EXPECT_TRUE(seen) \ +- EXPECT_NE(0, seen) ++ __EXPECT(0, "0", seen, #seen, !=, 0) + + /** + * EXPECT_FALSE(seen) +@@ -557,7 +557,7 @@ + * EXPECT_FALSE(measured): 0 == measured + */ + #define EXPECT_FALSE(seen) \ +- EXPECT_EQ(0, seen) ++ __EXPECT(0, "0", seen, #seen, ==, 0) + + /** + * EXPECT_STREQ(expected, seen) +@@ -597,7 +597,7 @@ + if (_metadata->passed && _metadata->step < 255) \ + _metadata->step++; + +-#define __EXPECT(_expected, _seen, _t, _assert) do { \ ++#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \ + /* Avoid multiple evaluation of the cases */ \ + __typeof__(_expected) __exp = (_expected); \ + __typeof__(_seen) __seen = (_seen); \ +@@ -606,8 +606,8 @@ + unsigned long long __exp_print = (uintptr_t)__exp; \ + unsigned long long __seen_print = (uintptr_t)__seen; \ + __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ +- #_expected, __exp_print, #_t, \ +- #_seen, __seen_print); \ ++ _expected_str, __exp_print, #_t, \ ++ _seen_str, __seen_print); \ + _metadata->passed = 0; \ + /* Ensure the optional handler is triggered */ \ + _metadata->trigger = 1; \ |