summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-03-11 13:19:48 -0400
committerMike Pagano <mpagano@gentoo.org>2020-03-11 13:19:48 -0400
commita7a2d5b8301b005322e03bf60e509e3f9c6775ab (patch)
tree720cad6eefdd594ba29137bcf270db33bef57b42
parentLinux patch 4.19.108 (diff)
downloadlinux-patches-a7a2d5b8301b005322e03bf60e509e3f9c6775ab.tar.gz
linux-patches-a7a2d5b8301b005322e03bf60e509e3f9c6775ab.tar.bz2
linux-patches-a7a2d5b8301b005322e03bf60e509e3f9c6775ab.zip
Linux patch 4.19.1094.19-108
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1108_linux-4.19.109.patch2869
2 files changed, 2873 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 65259b7c..f28ed716 100644
--- a/0000_README
+++ b/0000_README
@@ -471,6 +471,10 @@ Patch: 1107_linux-4.19.108.patch
From: https://www.kernel.org
Desc: Linux 4.19.108
+Patch: 1108_linux-4.19.109.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.109
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1108_linux-4.19.109.patch b/1108_linux-4.19.109.patch
new file mode 100644
index 00000000..ed6fc385
--- /dev/null
+++ b/1108_linux-4.19.109.patch
@@ -0,0 +1,2869 @@
+diff --git a/Makefile b/Makefile
+index 313f0c8dd66f..059c5e0aac15 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 108
++SUBLEVEL = 109
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
+index 20132477a871..80603af33cab 100644
+--- a/arch/arm/boot/dts/am437x-idk-evm.dts
++++ b/arch/arm/boot/dts/am437x-idk-evm.dts
+@@ -525,11 +525,11 @@
+ * Supply voltage supervisor on board will not allow opp50 so
+ * disable it and set opp100 as suspend OPP.
+ */
+- opp50@300000000 {
++ opp50-300000000 {
+ status = "disabled";
+ };
+
+- opp100@600000000 {
++ opp100-600000000 {
+ opp-suspend;
+ };
+ };
+diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
+index 613e4dc0ed3e..216e1d1a69c7 100644
+--- a/arch/arm/boot/dts/dra76x.dtsi
++++ b/arch/arm/boot/dts/dra76x.dtsi
+@@ -81,3 +81,8 @@
+ reg = <0x3fc>;
+ };
+ };
++
++&mmc3 {
++ /* dra76x is not affected by i887 */
++ max-frequency = <96000000>;
++};
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+index 6486df3e2942..881cea0b61ba 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+@@ -183,7 +183,6 @@
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ bus-width = <8>;
+ non-removable;
+- vmmc-supply = <&vdd_emmc_1p8>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
+index c1ed83131b49..37aeba999be3 100644
+--- a/arch/arm/boot/dts/imx7-colibri.dtsi
++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
+@@ -319,7 +319,6 @@
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ fsl,tuning-step = <2>;
+- max-frequency = <100000000>;
+ vmmc-supply = <&reg_module_3v3>;
+ vqmmc-supply = <&reg_DCDC3>;
+ non-removable;
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index 7e22309bccac..074b4ec520c6 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -584,7 +584,7 @@
+ };
+
+ mdio0: mdio@2d24000 {
+- compatible = "fsl,etsec2-mdio";
++ compatible = "gianfar";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -593,7 +593,7 @@
+ };
+
+ mdio1: mdio@2d64000 {
+- compatible = "fsl,etsec2-mdio";
++ compatible = "gianfar";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
+index bae179af21f6..e9cfe8e86f33 100644
+--- a/arch/arm/mach-imx/Makefile
++++ b/arch/arm/mach-imx/Makefile
+@@ -89,6 +89,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
+ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
+ obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
+ endif
++AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
++obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
+ obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
+
+ obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
+diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
+index 423dd76bb6b8..9728e39cd182 100644
+--- a/arch/arm/mach-imx/common.h
++++ b/arch/arm/mach-imx/common.h
+@@ -103,17 +103,17 @@ void imx_cpu_die(unsigned int cpu);
+ int imx_cpu_kill(unsigned int cpu);
+
+ #ifdef CONFIG_SUSPEND
+-void v7_cpu_resume(void);
+ void imx53_suspend(void __iomem *ocram_vbase);
+ extern const u32 imx53_suspend_sz;
+ void imx6_suspend(void __iomem *ocram_vbase);
+ #else
+-static inline void v7_cpu_resume(void) {}
+ static inline void imx53_suspend(void __iomem *ocram_vbase) {}
+ static const u32 imx53_suspend_sz;
+ static inline void imx6_suspend(void __iomem *ocram_vbase) {}
+ #endif
+
++void v7_cpu_resume(void);
++
+ void imx6_pm_ccm_init(const char *ccm_compat);
+ void imx6q_pm_init(void);
+ void imx6dl_pm_init(void);
+diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
+new file mode 100644
+index 000000000000..5bd1ba7ef15b
+--- /dev/null
++++ b/arch/arm/mach-imx/resume-imx6.S
+@@ -0,0 +1,24 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++#include <asm/asm-offsets.h>
++#include <asm/hardware/cache-l2x0.h>
++#include "hardware.h"
++
++/*
++ * The following code must assume it is running from physical address
++ * where absolute virtual addresses to the data section have to be
++ * turned into relative ones.
++ */
++
++ENTRY(v7_cpu_resume)
++ bl v7_invalidate_l1
++#ifdef CONFIG_CACHE_L2X0
++ bl l2c310_early_resume
++#endif
++ b cpu_resume
++ENDPROC(v7_cpu_resume)
+diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
+index 76ee2ceec8d5..7d84b617af48 100644
+--- a/arch/arm/mach-imx/suspend-imx6.S
++++ b/arch/arm/mach-imx/suspend-imx6.S
+@@ -333,17 +333,3 @@ resume:
+
+ ret lr
+ ENDPROC(imx6_suspend)
+-
+-/*
+- * The following code must assume it is running from physical address
+- * where absolute virtual addresses to the data section have to be
+- * turned into relative ones.
+- */
+-
+-ENTRY(v7_cpu_resume)
+- bl v7_invalidate_l1
+-#ifdef CONFIG_CACHE_L2X0
+- bl l2c310_early_resume
+-#endif
+- b cpu_resume
+-ENDPROC(v7_cpu_resume)
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 1eab54bc6ee9..a2183bb54a29 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -2188,11 +2188,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
+ * oprofile_cpu_type already has a value, then we are
+ * possibly overriding a real PVR with a logical one,
+ * and, in that case, keep the current value for
+- * oprofile_cpu_type.
++ * oprofile_cpu_type. Futhermore, let's ensure that the
++ * fix for the PMAO bug is enabled on compatibility mode.
+ */
+ if (old.oprofile_cpu_type != NULL) {
+ t->oprofile_cpu_type = old.oprofile_cpu_type;
+ t->oprofile_type = old.oprofile_type;
++ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
+ }
+ }
+
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 4bccde36cb16..9a3a698c8fca 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -140,7 +140,7 @@ all: bzImage
+ #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
+ KBUILD_IMAGE := $(boot)/bzImage
+
+-install: vmlinux
++install:
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+ bzImage: vmlinux
+diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
+index f6a9b0c20355..45c72d1f9e7d 100644
+--- a/arch/s390/boot/Makefile
++++ b/arch/s390/boot/Makefile
+@@ -46,7 +46,7 @@ quiet_cmd_ar = AR $@
+ $(obj)/startup.a: $(OBJECTS) FORCE
+ $(call if_changed,ar)
+
+-install: $(CONFIGURE) $(obj)/bzImage
++install:
+ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ System.map "$(INSTALL_PATH)"
+
+diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
+index 9c9970a5dfb1..1f2cd98dcb05 100644
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -228,7 +228,7 @@ struct qdio_buffer {
+ * @sbal: absolute SBAL address
+ */
+ struct sl_element {
+- unsigned long sbal;
++ u64 sbal;
+ } __attribute__ ((packed));
+
+ /**
+diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
+index 748456c365f4..9557c5a15b91 100644
+--- a/arch/x86/boot/compressed/kaslr_64.c
++++ b/arch/x86/boot/compressed/kaslr_64.c
+@@ -29,9 +29,6 @@
+ #define __PAGE_OFFSET __PAGE_OFFSET_BASE
+ #include "../../mm/ident_map.c"
+
+-/* Used by pgtable.h asm code to force instruction serialization. */
+-unsigned long __force_order;
+-
+ /* Used to track our page table allocation area. */
+ struct alloc_pgt_data {
+ unsigned char *pgt_buf;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index a6458ab499c2..7f43eba8d0c1 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -387,7 +387,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
+ * cpuid bit to be set. We need to ensure that we
+ * update that bit in this CPU's "cpu_info".
+ */
+- get_cpu_cap(c);
++ set_cpu_cap(c, X86_FEATURE_OSPKE);
+ }
+
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 6db8f3598c80..2a9a703ef4a0 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -313,7 +313,7 @@ void efi_sync_low_kernel_mappings(void)
+ static inline phys_addr_t
+ virt_to_phys_or_null_size(void *va, unsigned long size)
+ {
+- bool bad_size;
++ phys_addr_t pa;
+
+ if (!va)
+ return 0;
+@@ -321,16 +321,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
+ if (virt_addr_valid(va))
+ return virt_to_phys(va);
+
+- /*
+- * A fully aligned variable on the stack is guaranteed not to
+- * cross a page bounary. Try to catch strings on the stack by
+- * checking that 'size' is a power of two.
+- */
+- bad_size = size > PAGE_SIZE || !is_power_of_2(size);
++ pa = slow_virt_to_phys(va);
+
+- WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
++ /* check if the object crosses a page boundary */
++ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
++ return 0;
+
+- return slow_virt_to_phys(va);
++ return pa;
+ }
+
+ #define virt_to_phys_or_null(addr) \
+@@ -790,6 +787,8 @@ static efi_status_t
+ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size, void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name, phys_vendor, phys_attr;
+ u32 phys_data_size, phys_data;
+@@ -797,14 +796,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_data_size = virt_to_phys_or_null(data_size);
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_attr = virt_to_phys_or_null(attr);
+ phys_data = virt_to_phys_or_null_size(data, *data_size);
+
+- status = efi_thunk(get_variable, phys_name, phys_vendor,
+- phys_attr, phys_data_size, phys_data);
++ if (!phys_name || (data && !phys_data))
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(get_variable, phys_name, phys_vendor,
++ phys_attr, phys_data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -815,19 +819,25 @@ static efi_status_t
+ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+- /* If data_size is > sizeof(u32) we've got problems */
+- status = efi_thunk(set_variable, phys_name, phys_vendor,
+- attr, data_size, phys_data);
++ if (!phys_name || !phys_data)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(set_variable, phys_name, phys_vendor,
++ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -839,6 +849,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+@@ -846,13 +858,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
++ *vnd = *vendor;
++
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+- /* If data_size is > sizeof(u32) we've got problems */
+- status = efi_thunk(set_variable, phys_name, phys_vendor,
+- attr, data_size, phys_data);
++ if (!phys_name || !phys_data)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(set_variable, phys_name, phys_vendor,
++ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -864,21 +880,29 @@ efi_thunk_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_name_size = virt_to_phys_or_null(name_size);
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, *name_size);
+
+- status = efi_thunk(get_next_variable, phys_name_size,
+- phys_name, phys_vendor);
++ if (!phys_name)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(get_next_variable, phys_name_size,
++ phys_name, phys_vendor);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
++ *vendor = *vnd;
+ return status;
+ }
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 1730a26ff6ab..76864ea59160 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -908,14 +908,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
++#ifdef CONFIG_X86_64
++ unsigned int which;
++ u64 base;
++#endif
+
+ ret = 0;
+
+ switch (msr) {
+ #ifdef CONFIG_X86_64
+- unsigned which;
+- u64 base;
+-
+ case MSR_FS_BASE: which = SEGBASE_FS; goto set;
+ case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
+ case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
+diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
+index cee78f9c4794..935d1697ec36 100644
+--- a/drivers/dma/coh901318.c
++++ b/drivers/dma/coh901318.c
+@@ -1944,8 +1944,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+ return;
+ }
+
+- spin_lock(&cohc->lock);
+-
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+@@ -1966,8 +1964,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+ if (coh901318_queue_start(cohc) == NULL)
+ cohc->busy = 0;
+
+- spin_unlock(&cohc->lock);
+-
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index ceb82e74f5b4..eea89c3b54c1 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -335,6 +335,7 @@ struct sdma_desc {
+ * @sdma: pointer to the SDMA engine for this channel
+ * @channel: the channel number, matches dmaengine chan_id + 1
+ * @direction: transfer type. Needed for setting SDMA script
++ * @slave_config Slave configuration
+ * @peripheral_type: Peripheral type. Needed for setting SDMA script
+ * @event_id0: aka dma request line
+ * @event_id1: for channels that use 2 events
+@@ -362,6 +363,7 @@ struct sdma_channel {
+ struct sdma_engine *sdma;
+ unsigned int channel;
+ enum dma_transfer_direction direction;
++ struct dma_slave_config slave_config;
+ enum sdma_peripheral_type peripheral_type;
+ unsigned int event_id0;
+ unsigned int event_id1;
+@@ -440,6 +442,10 @@ struct sdma_engine {
+ struct sdma_buffer_descriptor *bd0;
+ };
+
++static int sdma_config_write(struct dma_chan *chan,
++ struct dma_slave_config *dmaengine_cfg,
++ enum dma_transfer_direction direction);
++
+ static struct sdma_driver_data sdma_imx31 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX31,
+ .num_events = 32,
+@@ -1122,18 +1128,6 @@ static int sdma_config_channel(struct dma_chan *chan)
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
+
+- if (sdmac->event_id0) {
+- if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+- return -EINVAL;
+- sdma_event_enable(sdmac, sdmac->event_id0);
+- }
+-
+- if (sdmac->event_id1) {
+- if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+- return -EINVAL;
+- sdma_event_enable(sdmac, sdmac->event_id1);
+- }
+-
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+ sdma_config_ownership(sdmac, false, true, true);
+@@ -1431,6 +1425,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+ struct scatterlist *sg;
+ struct sdma_desc *desc;
+
++ sdma_config_write(chan, &sdmac->slave_config, direction);
++
+ desc = sdma_transfer_init(sdmac, direction, sg_len);
+ if (!desc)
+ goto err_out;
+@@ -1515,6 +1511,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
++ sdma_config_write(chan, &sdmac->slave_config, direction);
++
+ desc = sdma_transfer_init(sdmac, direction, num_periods);
+ if (!desc)
+ goto err_out;
+@@ -1570,17 +1568,18 @@ err_out:
+ return NULL;
+ }
+
+-static int sdma_config(struct dma_chan *chan,
+- struct dma_slave_config *dmaengine_cfg)
++static int sdma_config_write(struct dma_chan *chan,
++ struct dma_slave_config *dmaengine_cfg,
++ enum dma_transfer_direction direction)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
++ if (direction == DMA_DEV_TO_MEM) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+ dmaengine_cfg->src_addr_width;
+ sdmac->word_size = dmaengine_cfg->src_addr_width;
+- } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
++ } else if (direction == DMA_DEV_TO_DEV) {
+ sdmac->per_address2 = dmaengine_cfg->src_addr;
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst &
+@@ -1594,10 +1593,33 @@ static int sdma_config(struct dma_chan *chan,
+ dmaengine_cfg->dst_addr_width;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+- sdmac->direction = dmaengine_cfg->direction;
++ sdmac->direction = direction;
+ return sdma_config_channel(chan);
+ }
+
++static int sdma_config(struct dma_chan *chan,
++ struct dma_slave_config *dmaengine_cfg)
++{
++ struct sdma_channel *sdmac = to_sdma_chan(chan);
++
++ memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
++
++ /* Set ENBLn earlier to make sure dma request triggered after that */
++ if (sdmac->event_id0) {
++ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
++ return -EINVAL;
++ sdma_event_enable(sdmac, sdmac->event_id0);
++ }
++
++ if (sdmac->event_id1) {
++ if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
++ return -EINVAL;
++ sdma_event_enable(sdmac, sdmac->event_id1);
++ }
++
++ return 0;
++}
++
+ static enum dma_status sdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index fb23993430d3..15481aeaeecd 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -288,7 +288,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
+
+ /* Do not allocate if desc are waiting for ack */
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
+- if (async_tx_test_ack(&dma_desc->txd)) {
++ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
+ list_del(&dma_desc->node);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ dma_desc->txd.flags = 0;
+@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+ bool was_busy;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+- if (list_empty(&tdc->pending_sg_req)) {
+- spin_unlock_irqrestore(&tdc->lock, flags);
+- return 0;
+- }
+
+ if (!tdc->busy)
+ goto skip_dma_stop;
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 94265e438514..05d6f9c86ac3 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2863,6 +2863,7 @@ static int init_csrows(struct mem_ctl_info *mci)
+ dimm = csrow->channels[j]->dimm;
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
++ dimm->grain = 64;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index b1da9ce54379..aa28a43ff842 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1118,8 +1118,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+ ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+- dev_warn(dev->dev, "pp done time out, lm=%d\n",
+- mdp5_cstate->pipeline.mixer->lm);
++ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
++ mdp5_cstate->pipeline.mixer->lm);
+ }
+
+ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index 5224010d90e4..b01762a7778a 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -328,7 +328,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+ return num;
+ }
+
+-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ int id = dsi_mgr_connector_get_id(connector);
+@@ -471,6 +471,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
++ struct msm_dsi_pll *src_pll;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+@@ -511,6 +512,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+ id, ret);
+ }
+
++ /* Save PLL status if it is a clock source */
++ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
++ msm_dsi_pll_save_state(src_pll);
++
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index 9a9fa0c75a13..c630871de7c5 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -726,10 +726,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+ if (!phy || !phy->cfg->ops.disable)
+ return;
+
+- /* Save PLL status if it is a clock source */
+- if (phy->usecase != MSM_DSI_PHY_SLAVE)
+- msm_dsi_pll_save_state(phy->pll);
+-
+ phy->cfg->ops.disable(phy);
+
+ dsi_phy_regulator_disable(phy);
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+index 31205625c734..21a69b046625 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+@@ -406,6 +406,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
++ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
++ if (rc) {
++ pr_err("vco_set_rate failed, rc=%d\n", rc);
++ return rc;
++ }
++
+ /* Start PLL */
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
+index cb65b0ed53fd..71a798e5d559 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
+@@ -110,48 +110,104 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XRGB4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XBGR4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_RGBX4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_BGRX4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XRGB1555,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XBGR1555,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_RGBX5551,
++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_BGRX5551,
++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
+@@ -200,12 +256,6 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+- {
+- .drm_fmt = DRM_FORMAT_YUV444,
+- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+- .rgb = true,
+- .csc = SUN8I_CSC_MODE_YUV2RGB,
+- },
+ {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+@@ -224,12 +274,6 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+- {
+- .drm_fmt = DRM_FORMAT_YVU444,
+- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+- .rgb = true,
+- .csc = SUN8I_CSC_MODE_YVU2RGB,
+- },
+ {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+index f4fe97813f94..15fc6363cc43 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
++++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+@@ -330,26 +330,26 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
+ };
+
+ /*
+- * While all RGB formats are supported, VI planes don't support
+- * alpha blending, so there is no point having formats with alpha
+- * channel if their opaque analog exist.
++ * While DE2 VI layer supports same RGB formats as UI layer, alpha
++ * channel is ignored. This structure lists all unique variants
++ * where alpha channel is replaced with "don't care" (X) channel.
+ */
+ static const u32 sun8i_vi_layer_formats[] = {
+- DRM_FORMAT_ABGR1555,
+- DRM_FORMAT_ABGR4444,
+- DRM_FORMAT_ARGB1555,
+- DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+- DRM_FORMAT_BGRA5551,
+- DRM_FORMAT_BGRA4444,
++ DRM_FORMAT_BGRX4444,
++ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+- DRM_FORMAT_RGBA4444,
+- DRM_FORMAT_RGBA5551,
++ DRM_FORMAT_RGBX4444,
++ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_RGBX8888,
++ DRM_FORMAT_XBGR1555,
++ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_XBGR8888,
++ DRM_FORMAT_XRGB1555,
++ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB8888,
+
+ DRM_FORMAT_NV16,
+@@ -363,11 +363,9 @@ static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+- DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+- DRM_FORMAT_YVU444,
+ };
+
+ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
+diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
+index 19f2a6d48bac..bdd7679fd298 100644
+--- a/drivers/hwmon/adt7462.c
++++ b/drivers/hwmon/adt7462.c
+@@ -426,7 +426,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
+ return 0x95;
+ break;
+ }
+- return -ENODEV;
++ return 0;
+ }
+
+ /* Provide labels for sysfs */
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 4c533275d1f2..64f206e11d49 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1231,6 +1231,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
+ /* Sharing an ib_cm_id with different handlers is not
+ * supported */
+ spin_unlock_irqrestore(&cm.lock, flags);
++ ib_destroy_cm_id(cm_id);
+ return ERR_PTR(-EINVAL);
+ }
+ atomic_inc(&cm_id_priv->refcount);
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index 5d676cff41f4..99dd8452724d 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -158,8 +158,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
+ {
+ struct list_head *e, *tmp;
+
+- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
++ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
++ list_del(e);
+ kfree(list_entry(e, struct iwcm_work, free_list));
++ }
+ }
+
+ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
+diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
+index 4e2565cccb8a..f2c2e725375e 100644
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -337,15 +337,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+ return NULL;
+
+ if (qp_attr_mask & IB_QP_PORT)
+- new_pps->main.port_num =
+- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
++ new_pps->main.port_num = qp_attr->port_num;
++ else if (qp_pps)
++ new_pps->main.port_num = qp_pps->main.port_num;
++
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
+- qp_attr->pkey_index;
++ new_pps->main.pkey_index = qp_attr->pkey_index;
++ else if (qp_pps)
++ new_pps->main.pkey_index = qp_pps->main.pkey_index;
++
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+
+- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
+ new_pps->main.port_num = qp_pps->main.port_num;
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+ if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index 90e12f9433a3..1cf1dfbf2596 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -595,10 +595,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ opa_get_lid(packet->dlid, 9B));
+ if (!mcast)
+ goto drop;
++ rcu_read_lock();
+ list_for_each_entry_rcu(p, &mcast->qp_list, list) {
+ packet->qp = p->qp;
+ if (hfi1_do_pkey_check(packet))
+- goto drop;
++ goto unlock_drop;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ packet_handler = qp_ok(packet);
+ if (likely(packet_handler))
+@@ -607,6 +608,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ ibp->rvp.n_pkt_drops++;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ }
++ rcu_read_unlock();
+ /*
+ * Notify rvt_multicast_detach() if it is waiting for us
+ * to finish.
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index 803c3544c75b..5abbbb656a52 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -360,8 +360,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+ if (mcast == NULL)
+ goto drop;
+ this_cpu_inc(ibp->pmastats->n_multicast_rcv);
++ rcu_read_lock();
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
++ rcu_read_unlock();
+ /*
+ * Notify rvt_multicast_detach() if it is waiting for us
+ * to finish.
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 84ff70027c25..2ddd575e97f7 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2859,8 +2859,8 @@ static void cache_postsuspend(struct dm_target *ti)
+ prevent_background_work(cache);
+ BUG_ON(atomic_read(&cache->nr_io_migrations));
+
+- cancel_delayed_work(&cache->waker);
+- flush_workqueue(cache->wq);
++ cancel_delayed_work_sync(&cache->waker);
++ drain_workqueue(cache->wq);
+ WARN_ON(cache->tracker.in_flight);
+
+ /*
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 2e22d588f056..5885239cc1f8 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -186,17 +186,19 @@ struct dm_integrity_c {
+ __u8 sectors_per_block;
+
+ unsigned char mode;
+- int suspending;
+
+ int failed;
+
+ struct crypto_shash *internal_hash;
+
++ struct dm_target *ti;
++
+ /* these variables are locked with endio_wait.lock */
+ struct rb_root in_progress;
+ struct list_head wait_list;
+ wait_queue_head_t endio_wait;
+ struct workqueue_struct *wait_wq;
++ struct workqueue_struct *offload_wq;
+
+ unsigned char commit_seq;
+ commit_id_t commit_ids[N_COMMIT_IDS];
+@@ -1236,7 +1238,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
+ dio->range.logical_sector += dio->range.n_sectors;
+ bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->wait_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ return;
+ }
+ do_endio_flush(ic, dio);
+@@ -1656,7 +1658,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+
+ if (need_sync_io && from_map) {
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->metadata_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ return;
+ }
+
+@@ -2080,7 +2082,7 @@ static void integrity_writer(struct work_struct *w)
+ unsigned prev_free_sectors;
+
+ /* the following test is not needed, but it tests the replay code */
+- if (READ_ONCE(ic->suspending) && !ic->meta_dev)
++ if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+ return;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+@@ -2139,7 +2141,7 @@ static void integrity_recalc(struct work_struct *w)
+
+ next_chunk:
+
+- if (unlikely(READ_ONCE(ic->suspending)))
++ if (unlikely(dm_suspended(ic->ti)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+@@ -2411,8 +2413,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
+
+ del_timer_sync(&ic->autocommit_timer);
+
+- WRITE_ONCE(ic->suspending, 1);
+-
+ if (ic->recalc_wq)
+ drain_workqueue(ic->recalc_wq);
+
+@@ -2426,8 +2426,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
+ dm_integrity_flush_buffers(ic);
+ }
+
+- WRITE_ONCE(ic->suspending, 0);
+-
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+
+ ic->journal_uptodate = true;
+@@ -3116,6 +3114,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ }
+ ti->private = ic;
+ ti->per_io_data_size = sizeof(struct dm_integrity_io);
++ ic->ti = ti;
+
+ ic->in_progress = RB_ROOT;
+ INIT_LIST_HEAD(&ic->wait_list);
+@@ -3310,6 +3309,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ goto bad;
+ }
+
++ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
++ METADATA_WORKQUEUE_MAX_ACTIVE);
++ if (!ic->offload_wq) {
++ ti->error = "Cannot allocate workqueue";
++ r = -ENOMEM;
++ goto bad;
++ }
++
+ ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
+ if (!ic->commit_wq) {
+ ti->error = "Cannot allocate workqueue";
+@@ -3546,6 +3553,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ destroy_workqueue(ic->metadata_wq);
+ if (ic->wait_wq)
+ destroy_workqueue(ic->wait_wq);
++ if (ic->offload_wq)
++ destroy_workqueue(ic->offload_wq);
+ if (ic->commit_wq)
+ destroy_workqueue(ic->commit_wq);
+ if (ic->writer_wq)
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 820c2e07dadf..4e4a09054f85 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -631,6 +631,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
+ wc->freelist_size++;
+ }
+
++static inline void writecache_verify_watermark(struct dm_writecache *wc)
++{
++ if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
++ queue_work(wc->writeback_wq, &wc->writeback_work);
++}
++
+ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+ {
+ struct wc_entry *e;
+@@ -652,8 +658,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+ list_del(&e->lru);
+ }
+ wc->freelist_size--;
+- if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
+- queue_work(wc->writeback_wq, &wc->writeback_work);
++
++ writecache_verify_watermark(wc);
+
+ return e;
+ }
+@@ -844,7 +850,7 @@ static void writecache_suspend(struct dm_target *ti)
+ }
+ wc_unlock(wc);
+
+- flush_workqueue(wc->writeback_wq);
++ drain_workqueue(wc->writeback_wq);
+
+ wc_lock(wc);
+ if (flush_on_suspend)
+@@ -967,6 +973,8 @@ erase_this:
+ writecache_commit_flushed(wc, false);
+ }
+
++ writecache_verify_watermark(wc);
++
+ wc_unlock(wc);
+ }
+
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 3965f3cf8ea1..43643151584a 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2353,6 +2353,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
++ set_bit(DMF_SUSPENDED, &md->flags);
+ dm_table_postsuspend_targets(map);
+ }
+ /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index ce9bd1b91210..fc237b820c4f 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -787,12 +787,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+- ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
++ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+- ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
++ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 51436e7eae10..ac5d945b934a 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1165,6 +1165,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
+
+ b53_get_vlan_entry(dev, vid, vl);
+
++ if (vid == 0 && vid == b53_default_pvid(dev))
++ untagged = true;
++
+ vl->members |= BIT(port);
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag |= BIT(port);
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index f181a28cb452..8c69789fbe09 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -73,8 +73,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+ /* Force link status for IMP port */
+ reg = core_readl(priv, offset);
+ reg |= (MII_SW_OR | LINK_STS);
+- if (priv->type == BCM7278_DEVICE_ID)
+- reg |= GMII_SPEED_UP_2G;
++ reg &= ~GMII_SPEED_UP_2G;
+ core_writel(priv, reg, offset);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index 8ae28f82aafd..e5fc89813852 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -413,10 +413,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+ lmac = &bgx->lmac[lmacid];
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+- if (enable)
++ if (enable) {
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+- else
++
++ /* enable TX FIFO Underflow interrupt */
++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
++ GMI_TXX_INT_UNDFLW);
++ } else {
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
++
++ /* Disable TX FIFO Underflow interrupt */
++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
++ GMI_TXX_INT_UNDFLW);
++ }
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+@@ -1544,6 +1553,48 @@ static int bgx_init_phy(struct bgx *bgx)
+ return bgx_init_of_phy(bgx);
+ }
+
++static irqreturn_t bgx_intr_handler(int irq, void *data)
++{
++ struct bgx *bgx = (struct bgx *)data;
++ u64 status, val;
++ int lmac;
++
++ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
++ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
++ if (status & GMI_TXX_INT_UNDFLW) {
++ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
++ bgx->bgx_id, lmac);
++ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
++ val &= ~CMR_EN;
++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
++ val |= CMR_EN;
++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
++ }
++ /* clear interrupts */
++ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static void bgx_register_intr(struct pci_dev *pdev)
++{
++ struct bgx *bgx = pci_get_drvdata(pdev);
++ int ret;
++
++ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
++ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
++ if (ret < 0) {
++ pci_err(pdev, "Req for #%d msix vectors failed\n",
++ BGX_LMAC_VEC_OFFSET);
++ return;
++ }
++ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
++ bgx, "BGX%d", bgx->bgx_id);
++ if (ret)
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
++}
++
+ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ int err;
+@@ -1559,7 +1610,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ pci_set_drvdata(pdev, bgx);
+
+- err = pci_enable_device(pdev);
++ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+@@ -1613,6 +1664,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ bgx_init_hw(bgx);
+
++ bgx_register_intr(pdev);
++
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+@@ -1629,6 +1682,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+ err_release_regions:
+ pci_release_regions(pdev);
+ err_disable_device:
+@@ -1646,6 +1700,8 @@ static void bgx_remove(struct pci_dev *pdev)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
++
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+index cbdd20b9ee6f..ac0c89cd5c3d 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -183,6 +183,15 @@
+ #define BGX_GMP_GMI_TXX_BURST 0x38228
+ #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+ #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
++#define BGX_GMP_GMI_TXX_INT 0x38500
++#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
++#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
++#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
++#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
++#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
++#define GMI_TXX_INT_XSDEF BIT_ULL(2)
++#define GMI_TXX_INT_XSCOL BIT_ULL(1)
++#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
+
+ #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+ #define BGX_MSIX_VEC_0_29_CTL 0x400008
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 4c5c87b158f5..627abef829c9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -772,13 +772,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+ else
+ return -EINVAL;
+
+- /* Tell the OS link is going down, the link will go back up when fw
+- * says it is ready asynchronously
+- */
+- ice_print_link_msg(vsi, false);
+- netif_carrier_off(netdev);
+- netif_tx_stop_all_queues(netdev);
+-
+ /* Set the FC mode and only restart AN if link is up */
+ status = ice_set_fc(pi, &aq_failures, link_up);
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index 35f8c9ef204d..9de59facec21 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -475,24 +475,6 @@ static int msg_enable;
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+-/**
+- * ks_rdreg8 - read 8 bit register from device
+- * @ks : The chip information
+- * @offset: The register address
+- *
+- * Read a 8bit register from the chip, returning the result
+- */
+-static u8 ks_rdreg8(struct ks_net *ks, int offset)
+-{
+- u16 data;
+- u8 shift_bit = offset & 0x03;
+- u8 shift_data = (offset & 1) << 3;
+- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+- data = ioread16(ks->hw_addr);
+- return (u8)(data >> shift_data);
+-}
+-
+ /**
+ * ks_rdreg16 - read 16 bit register from device
+ * @ks : The chip information
+@@ -503,27 +485,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
+
+ static u16 ks_rdreg16(struct ks_net *ks, int offset)
+ {
+- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ return ioread16(ks->hw_addr);
+ }
+
+-/**
+- * ks_wrreg8 - write 8bit register value to chip
+- * @ks: The chip information
+- * @offset: The register address
+- * @value: The value to write
+- *
+- */
+-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+-{
+- u8 shift_bit = (offset & 0x03);
+- u16 value_write = (u16)(value << ((offset & 1) << 3));
+- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+- iowrite16(value_write, ks->hw_addr);
+-}
+-
+ /**
+ * ks_wrreg16 - write 16bit register value to chip
+ * @ks: The chip information
+@@ -534,7 +500,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+
+ static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+ {
+- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value, ks->hw_addr);
+ }
+@@ -550,7 +516,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
+ {
+ len >>= 1;
+ while (len--)
+- *wptr++ = (u16)ioread16(ks->hw_addr);
++ *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
+ }
+
+ /**
+@@ -564,7 +530,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
+ {
+ len >>= 1;
+ while (len--)
+- iowrite16(*wptr++, ks->hw_addr);
++ iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
+ }
+
+ static void ks_disable_int(struct ks_net *ks)
+@@ -643,8 +609,7 @@ static void ks_read_config(struct ks_net *ks)
+ u16 reg_data = 0;
+
+ /* Regardless of bus width, 8 bit read should always work.*/
+- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
++ reg_data = ks_rdreg16(ks, KS_CCR);
+
+ /* addr/data bus are multiplexed */
+ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+@@ -748,7 +713,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+
+ /* 1. set sudo DMA mode */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+
+ /* 2. read prepend data */
+ /**
+@@ -765,7 +730,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+ ks_inblk(ks, buf, ALIGN(len, 4));
+
+ /* 4. reset sudo DMA Mode */
+- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ }
+
+ /**
+@@ -998,13 +963,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+ ks->txh.txw[1] = cpu_to_le16(len);
+
+ /* 1. set sudo-DMA mode */
+- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ /* 2. write status/lenth info */
+ ks_outblk(ks, ks->txh.txw, 4);
+ /* 3. write pkt data */
+ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+ /* 4. reset sudo-DMA mode */
+- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ /* 6. wait until TXQCR_METFE is auto-cleared */
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f41fd15b7b7c..a8132e8d72bb 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1058,8 +1058,8 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
+ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
+ void *buffer, size_t buflen, u32 *result)
+ {
++ union nvme_result res = { 0 };
+ struct nvme_command c;
+- union nvme_result res;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+index 0075fb0bef8c..77518010adc8 100644
+--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+@@ -19,6 +19,7 @@
+
+ #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
+ #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
++#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
+ #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
+ #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
+
+@@ -224,10 +225,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
+ {
+ struct phy_mdm6600 *ddata = data;
+ struct gpio_desc *mode_gpio1;
++ int error, wakeup;
+
+ mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
+- dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
+- gpiod_get_value(mode_gpio1));
++ wakeup = gpiod_get_value(mode_gpio1);
++ if (!wakeup)
++ return IRQ_NONE;
++
++ dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
++ error = pm_runtime_get_sync(ddata->dev);
++ if (error < 0) {
++ pm_runtime_put_noidle(ddata->dev);
++
++ return IRQ_NONE;
++ }
++
++ /* Just wake-up and kick the autosuspend timer */
++ pm_runtime_mark_last_busy(ddata->dev);
++ pm_runtime_put_autosuspend(ddata->dev);
+
+ return IRQ_HANDLED;
+ }
+@@ -477,8 +492,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
+
+ ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
+ phy_mdm6600_wake_modem(ddata);
++
++ /*
++ * The modem does not always stay awake 1.2 seconds after toggling
++ * the wake GPIO, and sometimes it idles after about some 600 ms
++ * making writes time out.
++ */
+ schedule_delayed_work(&ddata->modem_wake_work,
+- msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
++ msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
+ }
+
+ static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
+diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
+index 2a3f874a21d5..9cebff8e8d74 100644
+--- a/drivers/s390/cio/blacklist.c
++++ b/drivers/s390/cio/blacklist.c
+@@ -303,8 +303,10 @@ static void *
+ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+ {
+ struct ccwdev_iter *iter;
++ loff_t p = *offset;
+
+- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
++ (*offset)++;
++ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+ return NULL;
+ } else
+ iter->devno++;
+- (*offset)++;
+ return iter;
+ }
+
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index 034528a5453e..d040c4920ee7 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <linux/io.h>
+ #include <asm/qdio.h>
+
+ #include "cio.h"
+@@ -208,7 +209,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+
+ /* fill in sl */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+- q->sl->element[j].sbal = (unsigned long)q->sbal[j];
++ q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+ }
+
+ static void setup_queues(struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 81e2c591acb0..d99bfbfcafb7 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4929,10 +4929,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ int offset = QDIO_MAX_BUFFERS_PER_Q *
+ (card->qdio.no_in_queues - 1);
+- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+- in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
+- virt_to_phys(card->qdio.c_q->bufs[i].buffer);
+- }
++
++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
++ in_sbal_ptrs[offset + i] =
++ card->qdio.c_q->bufs[i].buffer;
+
+ queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
+ }
+@@ -4967,10 +4967,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
+ rc = -ENOMEM;
+ goto out_free_qib_param;
+ }
+- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+- in_sbal_ptrs[i] = (struct qdio_buffer *)
+- virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+- }
++
++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
++ in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
+
+ queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
+ GFP_KERNEL);
+@@ -4991,11 +4990,11 @@ static int qeth_qdio_establish(struct qeth_card *card)
+ rc = -ENOMEM;
+ goto out_free_queue_start_poll;
+ }
++
+ for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
+- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
+- out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
+- card->qdio.out_qs[i]->bufs[j]->buffer);
+- }
++ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
++ out_sbal_ptrs[k] =
++ card->qdio.out_qs[i]->bufs[j]->buffer;
+
+ memset(&init_data, 0, sizeof(struct qdio_initialize));
+ init_data.cdev = CARD_DDEV(card);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index b094a4e55c32..81bd824bb9d9 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -530,7 +530,8 @@ retry_alloc:
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+- GFP_KERNEL, &fusion->io_request_frames_phys);
++ GFP_KERNEL | __GFP_NOWARN,
++ &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+@@ -568,7 +569,7 @@ retry_alloc:
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+- GFP_KERNEL,
++ GFP_KERNEL | __GFP_NOWARN,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 59feda261e08..5be4212312cb 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -866,6 +866,8 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
++ while (pm8001_dev->running_req)
++ msleep(20);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ }
+ PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+@@ -1238,8 +1240,10 @@ int pm8001_abort_task(struct sas_task *task)
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for Port reset\n"));
+ wait_for_completion(&completion_reset);
+- if (phy->port_reset_status)
++ if (phy->port_reset_status) {
++ pm8001_dev_gone_notify(dev);
+ goto out;
++ }
+
+ /*
+ * 4. SATA Abort ALL
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 8627feb80261..c63b5db435c5 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -604,7 +604,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
+ 0x0000ffff;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
+- 0x140000;
++ CHIP_8006_PORT_RECOVERY_TIMEOUT;
+ }
+ pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
+index 7dd2699d0efb..bbe1747234ff 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.h
++++ b/drivers/scsi/pm8001/pm80xx_hwi.h
+@@ -228,6 +228,8 @@
+ #define SAS_MAX_AIP 0x200000
+ #define IT_NEXUS_TIMEOUT 0x7D0
+ #define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
++/* Port recovery timeout, 10000 ms for PM8006 controller */
++#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000
+
+ #ifdef __LITTLE_ENDIAN_BITFIELD
+ struct sas_identify_frame_local {
+diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
+index 9a06ffdb73b8..1669c554ea34 100644
+--- a/drivers/spi/spi-bcm63xx-hsspi.c
++++ b/drivers/spi/spi-bcm63xx-hsspi.c
+@@ -371,7 +371,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+ goto out_disable_clk;
+
+ rate = clk_get_rate(pll_clk);
+- clk_disable_unprepare(pll_clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_disable_pll_clk;
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 0089aa305ef9..870735776437 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -24,6 +24,14 @@
+
+ #include "8250.h"
+
++#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
++#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
++#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
++#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
++#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
++#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
++#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
++
+ #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
+ #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
+ #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
+@@ -571,6 +579,22 @@ static int __maybe_unused exar_resume(struct device *dev)
+
+ static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+
++static const struct exar8250_board acces_com_2x = {
++ .num_ports = 2,
++ .setup = pci_xr17c154_setup,
++};
++
++static const struct exar8250_board acces_com_4x = {
++ .num_ports = 4,
++ .setup = pci_xr17c154_setup,
++};
++
++static const struct exar8250_board acces_com_8x = {
++ .num_ports = 8,
++ .setup = pci_xr17c154_setup,
++};
++
++
+ static const struct exar8250_board pbn_fastcom335_2 = {
+ .num_ports = 2,
+ .setup = pci_fastcom335_setup,
+@@ -639,6 +663,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
+ }
+
+ static const struct pci_device_id exar_pci_tbl[] = {
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
++
++
+ CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
+ CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
+ CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
+diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
+index 3bdd56a1021b..ea12f10610b6 100644
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
++ /* enable RX and TX ready overide */
++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+ /* reenable the UART */
+ ar933x_uart_rmw(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
+@@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port)
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
++ /* enable RX and TX ready overide */
++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+ /* Enable RX interrupts */
+ up->ier = AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 7d26c9b57d8e..fb9d369e0f50 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -840,7 +840,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
+
+ port->membase = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(port->membase))
+- return -PTR_ERR(port->membase);
++ return PTR_ERR(port->membase);
+
+ mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
+ GFP_KERNEL);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 3ac4fe549c2e..8a1671724835 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -14,6 +14,7 @@
+ #include <linux/tty.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
++#include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+
+@@ -43,6 +44,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
+ static int sel_end;
+ static int sel_buffer_lth;
+ static char *sel_buffer;
++static DEFINE_MUTEX(sel_lock);
+
+ /* clear_selection, highlight and highlight_pointer can be called
+ from interrupt (via scrollback/front) */
+@@ -165,7 +167,7 @@ static int store_utf8(u32 c, char *p)
+ * The entire selection process is managed under the console_lock. It's
+ * a lot under the lock but its hardly a performance path
+ */
+-int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
++static int __set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
+ {
+ struct vc_data *vc = vc_cons[fg_console].d;
+ int new_sel_start, new_sel_end, spc;
+@@ -173,7 +175,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
+ char *bp, *obp;
+ int i, ps, pe, multiplier;
+ u32 c;
+- int mode;
++ int mode, ret = 0;
+
+ poke_blanked_console();
+ if (copy_from_user(&v, sel, sizeof(*sel)))
+@@ -322,7 +324,21 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
+ }
+ }
+ sel_buffer_lth = bp - sel_buffer;
+- return 0;
++
++ return ret;
++}
++
++int set_selection(const struct tiocl_selection __user *v, struct tty_struct *tty)
++{
++ int ret;
++
++ mutex_lock(&sel_lock);
++ console_lock();
++ ret = __set_selection(v, tty);
++ console_unlock();
++ mutex_unlock(&sel_lock);
++
++ return ret;
+ }
+
+ /* Insert the contents of the selection buffer into the
+@@ -351,6 +367,7 @@ int paste_selection(struct tty_struct *tty)
+ tty_buffer_lock_exclusive(&vc->port);
+
+ add_wait_queue(&vc->paste_wait, &wait);
++ mutex_lock(&sel_lock);
+ while (sel_buffer && sel_buffer_lth > pasted) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+@@ -358,7 +375,9 @@ int paste_selection(struct tty_struct *tty)
+ break;
+ }
+ if (tty_throttled(tty)) {
++ mutex_unlock(&sel_lock);
+ schedule();
++ mutex_lock(&sel_lock);
+ continue;
+ }
+ __set_current_state(TASK_RUNNING);
+@@ -367,6 +386,7 @@ int paste_selection(struct tty_struct *tty)
+ count);
+ pasted += count;
+ }
++ mutex_unlock(&sel_lock);
+ remove_wait_queue(&vc->paste_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index ddaecb1bd9fd..5cecf529562a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3022,9 +3022,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ switch (type)
+ {
+ case TIOCL_SETSEL:
+- console_lock();
+ ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
+- console_unlock();
+ break;
+ case TIOCL_PASTESEL:
+ ret = paste_selection(tty);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 27486b0a027a..8cf2d2a5e266 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -961,13 +961,17 @@ int usb_remove_device(struct usb_device *udev)
+ {
+ struct usb_hub *hub;
+ struct usb_interface *intf;
++ int ret;
+
+ if (!udev->parent) /* Can't remove a root hub */
+ return -EINVAL;
+ hub = usb_hub_to_struct_hub(udev->parent);
+ intf = to_usb_interface(hub->intfdev);
+
+- usb_autopm_get_interface(intf);
++ ret = usb_autopm_get_interface(intf);
++ if (ret < 0)
++ return ret;
++
+ set_bit(udev->portnum, hub->removed_bits);
+ hub_port_logical_disconnect(hub, udev->portnum);
+ usb_autopm_put_interface(intf);
+@@ -1833,7 +1837,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+
+ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
+ hub->quirk_disable_autosuspend = 1;
+- usb_autopm_get_interface(intf);
++ usb_autopm_get_interface_no_resume(intf);
+ }
+
+ if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 4a2143195395..1fe83b522d5f 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -203,7 +203,10 @@ static int usb_port_runtime_resume(struct device *dev)
+ if (!port_dev->is_superspeed && peer)
+ pm_runtime_get_sync(&peer->dev);
+
+- usb_autopm_get_interface(intf);
++ retval = usb_autopm_get_interface(intf);
++ if (retval < 0)
++ return retval;
++
+ retval = usb_hub_set_port_power(hdev, hub, port1, true);
+ msleep(hub_power_on_good_delay(hub));
+ if (udev && !retval) {
+@@ -256,7 +259,10 @@ static int usb_port_runtime_suspend(struct device *dev)
+ if (usb_port_block_power_off)
+ return -EBUSY;
+
+- usb_autopm_get_interface(intf);
++ retval = usb_autopm_get_interface(intf);
++ if (retval < 0)
++ return retval;
++
+ retval = usb_hub_set_port_power(hdev, hub, port1, false);
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
+ if (!port_dev->is_superspeed)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 2b24336a72e5..2dac3e7cdd97 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Logitech PTZ Pro Camera */
+ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
++ /* Logitech Screen Share */
++ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
++
+ /* Logitech Quickcam Fusion */
+ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 430cfd620854..d482f89ffae2 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1067,7 +1067,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ unsigned int rem = length % maxp;
+ unsigned chain = true;
+
+- if (sg_is_last(s))
++ /*
++ * IOMMU driver is coalescing the list of sgs which shares a
++ * page boundary into one and giving it to USB driver. With
++ * this the number of sgs mapped is not equal to the number of
++ * sgs passed. So mark the chain bit to false if it isthe last
++ * mapped sg.
++ */
++ if (i == remaining - 1)
+ chain = false;
+
+ if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index fea7c7e0143f..30aefd1adbad 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -438,9 +438,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ if (!val)
+ return 0;
+ if (speed < USB_SPEED_SUPER)
+- return DIV_ROUND_UP(val, 2);
++ return min(val, 500U) / 2;
+ else
+- return DIV_ROUND_UP(val, 8);
++ /*
++ * USB 3.x supports up to 900mA, but since 900 isn't divisible
++ * by 8 the integral division will effectively cap to 896mA.
++ */
++ return min(val, 900U) / 8;
+ }
+
+ static int config_buf(struct usb_configuration *config,
+@@ -838,6 +842,10 @@ static int set_config(struct usb_composite_dev *cdev,
+
+ /* when we return, be sure our power usage is valid */
+ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
++ if (gadget->speed < USB_SPEED_SUPER)
++ power = min(power, 500U);
++ else
++ power = min(power, 900U);
+ done:
+ usb_gadget_vbus_draw(gadget, power);
+ if (result >= 0 && cdev->delayed_status)
+@@ -2264,7 +2272,7 @@ void composite_resume(struct usb_gadget *gadget)
+ {
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_function *f;
+- u16 maxpower;
++ unsigned maxpower;
+
+ /* REVISIT: should we have config level
+ * suspend/resume callbacks?
+@@ -2278,10 +2286,14 @@ void composite_resume(struct usb_gadget *gadget)
+ f->resume(f);
+ }
+
+- maxpower = cdev->config->MaxPower;
++ maxpower = cdev->config->MaxPower ?
++ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
++ if (gadget->speed < USB_SPEED_SUPER)
++ maxpower = min(maxpower, 500U);
++ else
++ maxpower = min(maxpower, 900U);
+
+- usb_gadget_vbus_draw(gadget, maxpower ?
+- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
++ usb_gadget_vbus_draw(gadget, maxpower);
+ }
+
+ cdev->suspended = 0;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 2050993fb58b..a9239455eb6d 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1077,18 +1077,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+ struct ffs_io_data *io_data = kiocb->private;
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
++ unsigned long flags;
+ int value;
+
+ ENTER();
+
+- spin_lock_irq(&epfile->ffs->eps_lock);
++ spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
+
+ if (likely(io_data && io_data->ep && io_data->req))
+ value = usb_ep_dequeue(io_data->ep, io_data->req);
+ else
+ value = -EINVAL;
+
+- spin_unlock_irq(&epfile->ffs->eps_lock);
++ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
+
+ return value;
+ }
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index d4d317db89df..38afe96c5cd2 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -567,8 +567,10 @@ static int gs_start_io(struct gs_port *port)
+ port->n_read = 0;
+ started = gs_start_rx(port);
+
+- /* unblock any pending writes into our circular buffer */
+ if (started) {
++ gs_start_tx(port);
++ /* Unblock any pending writes into our circular buffer, in case
++ * we didn't in gs_start_tx() */
+ tty_wakeup(port->port.tty);
+ } else {
+ gs_free_requests(ep, head, &port->read_allocated);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 1cd9b6305b06..1880f3e13f57 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
+ USB_SC_RBC, USB_PR_BULK, NULL,
+ 0 ),
+
++UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
++ "Samsung",
++ "Flash Drive FIT",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* aeb */
+ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
+ "Feiya",
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index c6b3bdbbdbc9..bfaa9ec4bc1f 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
+ static int vgacon_resize(struct vc_data *c, unsigned int width,
+ unsigned int height, unsigned int user)
+ {
++ if ((width << 1) * height > vga_vram_size)
++ return -EINVAL;
++
+ if (width % 2 || width > screen_info.orig_video_cols ||
+ height > (screen_info.orig_video_lines * vga_default_font_height)/
+ c->vc_font.height)
+diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
+index fe169d8e1fb2..7f0a8e635286 100644
+--- a/drivers/watchdog/da9062_wdt.c
++++ b/drivers/watchdog/da9062_wdt.c
+@@ -99,13 +99,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
+ struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+- ret = da9062_reset_watchdog_timer(wdt);
+- if (ret) {
+- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
+- ret);
+- return ret;
+- }
+-
+ ret = regmap_update_bits(wdt->hw->regmap,
+ DA9062AA_CONTROL_D,
+ DA9062AA_TWDSCALE_MASK,
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index fbebf241dbf2..51d410c6f6a4 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2003,6 +2003,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+ struct inode *inode = d_inode(dentry);
+ struct super_block *sb = dentry->d_sb;
+ char *full_path = NULL;
++ int count = 0;
+
+ if (inode == NULL)
+ return -ENOENT;
+@@ -2024,15 +2025,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+ full_path, inode, inode->i_count.counter,
+ dentry, cifs_get_time(dentry), jiffies);
+
++again:
+ if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+ rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+ else
+ rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
+ xid, NULL);
+-
++ if (rc == -EAGAIN && count++ < 10)
++ goto again;
+ out:
+ kfree(full_path);
+ free_xid(xid);
++
+ return rc;
+ }
+
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index d6b81e31f9f5..70d37a5fd72c 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -743,6 +743,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
+ return NULL;
+
+ init_rwsem(&ei->truncate_lock);
++ /* Zeroing to allow iput() even if partial initialized inode. */
++ ei->mmu_private = 0;
++ ei->i_start = 0;
++ ei->i_logstart = 0;
++ ei->i_attrs = 0;
++ ei->i_pos = 0;
++
+ return &ei->vfs_inode;
+ }
+
+@@ -1373,16 +1380,6 @@ out:
+ return 0;
+ }
+
+-static void fat_dummy_inode_init(struct inode *inode)
+-{
+- /* Initialize this dummy inode to work as no-op. */
+- MSDOS_I(inode)->mmu_private = 0;
+- MSDOS_I(inode)->i_start = 0;
+- MSDOS_I(inode)->i_logstart = 0;
+- MSDOS_I(inode)->i_attrs = 0;
+- MSDOS_I(inode)->i_pos = 0;
+-}
+-
+ static int fat_read_root(struct inode *inode)
+ {
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+@@ -1827,13 +1824,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ fat_inode = new_inode(sb);
+ if (!fat_inode)
+ goto out_fail;
+- fat_dummy_inode_init(fat_inode);
+ sbi->fat_inode = fat_inode;
+
+ fsinfo_inode = new_inode(sb);
+ if (!fsinfo_inode)
+ goto out_fail;
+- fat_dummy_inode_init(fsinfo_inode);
+ fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+ sbi->fsinfo_inode = fsinfo_inode;
+ insert_inode_hash(fsinfo_inode);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 00050a22f6a1..92aad49b82f9 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -625,6 +625,18 @@ void wait_for_kprobe_optimizer(void)
+ mutex_unlock(&kprobe_mutex);
+ }
+
++static bool optprobe_queued_unopt(struct optimized_kprobe *op)
++{
++ struct optimized_kprobe *_op;
++
++ list_for_each_entry(_op, &unoptimizing_list, list) {
++ if (op == _op)
++ return true;
++ }
++
++ return false;
++}
++
+ /* Optimize kprobe if p is ready to be optimized */
+ static void optimize_kprobe(struct kprobe *p)
+ {
+@@ -646,17 +658,21 @@ static void optimize_kprobe(struct kprobe *p)
+ return;
+
+ /* Check if it is already optimized. */
+- if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
++ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
++ if (optprobe_queued_unopt(op)) {
++ /* This is under unoptimizing. Just dequeue the probe */
++ list_del_init(&op->list);
++ }
+ return;
++ }
+ op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
+
+- if (!list_empty(&op->list))
+- /* This is under unoptimizing. Just dequeue the probe */
+- list_del_init(&op->list);
+- else {
+- list_add(&op->list, &optimizing_list);
+- kick_kprobe_optimizer();
+- }
++ /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
++ if (WARN_ON_ONCE(!list_empty(&op->list)))
++ return;
++
++ list_add(&op->list, &optimizing_list);
++ kick_kprobe_optimizer();
+ }
+
+ /* Short cut to direct unoptimizing */
+@@ -678,30 +694,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
+ return; /* This is not an optprobe nor optimized */
+
+ op = container_of(p, struct optimized_kprobe, kp);
+- if (!kprobe_optimized(p)) {
+- /* Unoptimized or unoptimizing case */
+- if (force && !list_empty(&op->list)) {
+- /*
+- * Only if this is unoptimizing kprobe and forced,
+- * forcibly unoptimize it. (No need to unoptimize
+- * unoptimized kprobe again :)
+- */
+- list_del_init(&op->list);
+- force_unoptimize_kprobe(op);
+- }
++ if (!kprobe_optimized(p))
+ return;
+- }
+
+ if (!list_empty(&op->list)) {
+- /* Dequeue from the optimization queue */
+- list_del_init(&op->list);
++ if (optprobe_queued_unopt(op)) {
++ /* Queued in unoptimizing queue */
++ if (force) {
++ /*
++ * Forcibly unoptimize the kprobe here, and queue it
++ * in the freeing list for release afterwards.
++ */
++ force_unoptimize_kprobe(op);
++ list_move(&op->list, &freeing_list);
++ }
++ } else {
++ /* Dequeue from the optimizing queue */
++ list_del_init(&op->list);
++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
++ }
+ return;
+ }
++
+ /* Optimized kprobe case */
+- if (force)
++ if (force) {
+ /* Forcibly update the code: this is a special case */
+ force_unoptimize_kprobe(op);
+- else {
++ } else {
+ list_add(&op->list, &unoptimizing_list);
+ kick_kprobe_optimizer();
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 146998357bed..280b0e71b783 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2949,8 +2949,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ return;
+
+ flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
+- pmdval = *pvmw->pmd;
+- pmdp_invalidate(vma, address, pvmw->pmd);
++ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+ if (pmd_dirty(pmdval))
+ set_page_dirty(page);
+ entry = make_migration_entry(page, pmd_write(pmdval));
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 6d331620b9e5..86837f25055b 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -162,6 +162,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ return pages;
+ }
+
++/*
++ * Used when setting automatic NUMA hinting protection where it is
++ * critical that a numa hinting PMD is not confused with a bad PMD.
++ */
++static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
++{
++ pmd_t pmdval = pmd_read_atomic(pmd);
++
++ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ barrier();
++#endif
++
++ if (pmd_none(pmdval))
++ return 1;
++ if (pmd_trans_huge(pmdval))
++ return 0;
++ if (unlikely(pmd_bad(pmdval))) {
++ pmd_clear_bad(pmd);
++ return 1;
++ }
++
++ return 0;
++}
++
+ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr, unsigned long end,
+ pgprot_t newprot, int dirty_accountable, int prot_numa)
+@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ unsigned long this_pages;
+
+ next = pmd_addr_end(addr, end);
+- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
+- && pmd_none_or_clear_bad(pmd))
++
++ /*
++ * Automatic NUMA balancing walks the tables with mmap_sem
++ * held for read. It's possible a parallel update to occur
++ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
++ * check leading to a false positive and clearing.
++ * Hence, it's necessary to atomically read the PMD value
++ * for all the checks.
++ */
++ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
++ pmd_none_or_clear_bad_unless_trans_huge(pmd))
+ goto next;
+
+ /* invoke the mmu notifier if the pmd is populated */
+diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
+index 60cb00fd0c69..84b44cdae28a 100644
+--- a/sound/hda/ext/hdac_ext_controller.c
++++ b/sound/hda/ext/hdac_ext_controller.c
+@@ -262,6 +262,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
+ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
+ struct hdac_ext_link *link)
+ {
++ unsigned long codec_mask;
+ int ret = 0;
+
+ mutex_lock(&bus->lock);
+@@ -283,9 +284,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
+ * HDA spec section 4.3 - Codec Discovery
+ */
+ udelay(521);
+- bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+- dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+- snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
++ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
++ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask);
++ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
++ if (!bus->codec_mask)
++ bus->codec_mask = codec_mask;
+ }
+
+ mutex_unlock(&bus->lock);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 94fffc0675a7..86ab022f386e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2442,6 +2442,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
++ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+@@ -6465,6 +6466,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ [ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+@@ -6886,6 +6889,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index f0f2d4fd3769..5272c81641c1 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1437,13 +1437,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ }
+
+ pcm512x->sclk = devm_clk_get(dev, NULL);
+- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
++ ret = -EPROBE_DEFER;
++ goto err;
++ }
+ if (!IS_ERR(pcm512x->sclk)) {
+ ret = clk_prepare_enable(pcm512x->sclk);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable SCLK: %d\n", ret);
+- return ret;
++ goto err;
+ }
+ }
+
+diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
+index faf1cba57abb..a0714c0e6e8b 100644
+--- a/sound/soc/intel/skylake/skl-debug.c
++++ b/sound/soc/intel/skylake/skl-debug.c
+@@ -42,8 +42,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
+ int i;
+ ssize_t ret = 0;
+
+- for (i = 0; i < max_pin; i++)
+- ret += snprintf(buf + size, MOD_BUF - size,
++ for (i = 0; i < max_pin; i++) {
++ ret += scnprintf(buf + size, MOD_BUF - size,
+ "%s %d\n\tModule %d\n\tInstance %d\n\t"
+ "In-used %s\n\tType %s\n"
+ "\tState %d\n\tIndex %d\n",
+@@ -53,13 +53,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
+ m_pin[i].in_use ? "Used" : "Unused",
+ m_pin[i].is_dynamic ? "Dynamic" : "Static",
+ m_pin[i].pin_state, i);
++ size += ret;
++ }
+ return ret;
+ }
+
+ static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
+ ssize_t size, bool direction)
+ {
+- return snprintf(buf + size, MOD_BUF - size,
++ return scnprintf(buf + size, MOD_BUF - size,
+ "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
+ "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
+ "Sample Type %d\n\tCh Map %#x\n",
+@@ -81,16 +83,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ if (!buf)
+ return -ENOMEM;
+
+- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
++ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
+ "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
+ mconfig->id.module_id, mconfig->id.instance_id,
+ mconfig->id.pvt_id);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Resources:\n\tMCPS %#x\n\tIBS %#x\n\tOBS %#x\t\n",
+ mconfig->mcps, mconfig->ibs, mconfig->obs);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Module data:\n\tCore %d\n\tIn queue %d\n\t"
+ "Out queue %d\n\tType %s\n",
+ mconfig->core_id, mconfig->max_in_queue,
+@@ -100,38 +102,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
+ ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Fixup:\n\tParams %#x\n\tConverter %#x\n",
+ mconfig->params_fixup, mconfig->converter);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
+ mconfig->dev_type, mconfig->vbus_id,
+ mconfig->hw_conn_type, mconfig->time_slot);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
+ "Pages %#x\n", mconfig->pipe->ppl_id,
+ mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
+ mconfig->pipe->memory_pages);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
+ mconfig->pipe->p_params->host_dma_id,
+ mconfig->pipe->p_params->link_dma_id);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
+ mconfig->pipe->p_params->ch,
+ mconfig->pipe->p_params->s_freq,
+ mconfig->pipe->p_params->s_fmt);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tLink %#x\n\tStream %#x\n",
+ mconfig->pipe->p_params->linktype,
+ mconfig->pipe->p_params->stream);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tState %d\n\tPassthru %s\n",
+ mconfig->pipe->state,
+ mconfig->pipe->passthru ? "true" : "false");
+@@ -141,7 +143,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ ret += skl_print_pins(mconfig->m_out_pin, buf,
+ mconfig->max_out_queue, ret, false);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
+ "Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
+ "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
+@@ -199,7 +201,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
+ __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+
+ for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
+- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
++ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+ hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
+ tmp + ret, FW_REG_BUF - ret, 0);
+ ret += strlen(tmp + ret);
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 7f0b48b36380..db5b005f4b1e 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -4551,7 +4551,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
+ continue;
+ if (w->power) {
+ dapm_seq_insert(w, &down_list, false);
+- w->power = 0;
++ w->new_power = 0;
+ powerdown = 1;
+ }
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index f7d4a77028f2..356d4e754561 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -3357,16 +3357,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ ssize_t offset = 0;
+
+ /* FE state */
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "[%s - %s]\n", fe->dai_link->name,
+ stream ? "Capture" : "Playback");
+
+- offset += snprintf(buf + offset, size - offset, "State: %s\n",
++ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
+ dpcm_state_string(fe->dpcm[stream].state));
+
+ if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+@@ -3374,10 +3374,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ params_rate(params));
+
+ /* BEs state */
+- offset += snprintf(buf + offset, size - offset, "Backends:\n");
++ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
+
+ if (list_empty(&fe->dpcm[stream].be_clients)) {
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " No active DSP links\n");
+ goto out;
+ }
+@@ -3386,16 +3386,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be = dpcm->be;
+ params = &dpcm->hw_params;
+
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "- %s\n", be->dai_link->name);
+
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " State: %s\n",
+ dpcm_state_string(be->dpcm[stream].state));
+
+ if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 069f38fbf07b..30fc45aa1869 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -2154,8 +2154,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
+ }
+
+ ret = soc_tplg_link_config(tplg, _link);
+- if (ret < 0)
++ if (ret < 0) {
++ if (!abi_match)
++ kfree(_link);
+ return ret;
++ }
+
+ /* offset by version-specific struct size and
+ * real priv data size
+@@ -2310,7 +2313,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
+ {
+ struct snd_soc_tplg_manifest *manifest, *_manifest;
+ bool abi_match;
+- int err;
++ int ret = 0;
+
+ if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
+ return 0;
+@@ -2323,19 +2326,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
+ _manifest = manifest;
+ } else {
+ abi_match = false;
+- err = manifest_new_ver(tplg, manifest, &_manifest);
+- if (err < 0)
+- return err;
++ ret = manifest_new_ver(tplg, manifest, &_manifest);
++ if (ret < 0)
++ return ret;
+ }
+
+ /* pass control to component driver for optional further init */
+ if (tplg->comp && tplg->ops && tplg->ops->manifest)
+- return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
++ ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
+
+ if (!abi_match) /* free the duplicated one */
+ kfree(_manifest);
+
+- return 0;
++ return ret;
+ }
+
+ /* validate header magic, size and type */
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 8b0f16409ed7..0ef203ec59fd 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -85,17 +85,20 @@ else
+ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
+ endif
+
++define INSTALL_SINGLE_RULE
++ $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
++ $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
++ $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
++endef
++
+ define INSTALL_RULE
+- @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
+- mkdir -p ${INSTALL_PATH}; \
+- echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
+- rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
+- fi
+- @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
+- mkdir -p ${INSTALL_PATH}; \
+- echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
+- rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
+- fi
++ $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
+ endef
+
+ install: all
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
+index e6fd7a18c655..0266443601bc 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
+@@ -63,22 +63,23 @@ test_span_gre_mac()
+ {
+ local tundev=$1; shift
+ local direction=$1; shift
+- local prot=$1; shift
+ local what=$1; shift
+
+- local swp3mac=$(mac_get $swp3)
+- local h3mac=$(mac_get $h3)
++ case "$direction" in
++ ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2)
++ ;;
++ egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1)
++ ;;
++ esac
+
+ RET=0
+
+ mirror_install $swp1 $direction $tundev "matchall $tcflags"
+- tc filter add dev $h3 ingress pref 77 prot $prot \
+- flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
+- action pass
++ icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
+
+- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
++ mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
+
+- tc filter del dev $h3 ingress pref 77
++ icmp_capture_uninstall h3-${tundev}
+ mirror_uninstall $swp1 $direction
+
+ log_test "$direction $what: envelope MAC ($tcflags)"
+@@ -120,14 +121,14 @@ test_ip6gretap()
+
+ test_gretap_mac()
+ {
+- test_span_gre_mac gt4 ingress ip "mirror to gretap"
+- test_span_gre_mac gt4 egress ip "mirror to gretap"
++ test_span_gre_mac gt4 ingress "mirror to gretap"
++ test_span_gre_mac gt4 egress "mirror to gretap"
+ }
+
+ test_ip6gretap_mac()
+ {
+- test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
+- test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
++ test_span_gre_mac gt6 ingress "mirror to ip6gretap"
++ test_span_gre_mac gt6 egress "mirror to ip6gretap"
+ }
+
+ test_all()