summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-11-06 09:25:14 -0500
committerMike Pagano <mpagano@gentoo.org>2019-11-06 09:25:14 -0500
commit4915351a6e62d9ac18b3f39c99b8a0176bb99374 (patch)
tree44b018b591b139c45a57455cd381c88be39fbfed /1151_linux-4.14.152.patch
parentLinux patch 4.14.151 (diff)
downloadlinux-patches-4915351a6e62d9ac18b3f39c99b8a0176bb99374.tar.gz
linux-patches-4915351a6e62d9ac18b3f39c99b8a0176bb99374.tar.bz2
linux-patches-4915351a6e62d9ac18b3f39c99b8a0176bb99374.zip
Linux patch 4.14.1524.14-161
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1151_linux-4.14.152.patch')
-rw-r--r--1151_linux-4.14.152.patch3858
1 files changed, 3858 insertions, 0 deletions
diff --git a/1151_linux-4.14.152.patch b/1151_linux-4.14.152.patch
new file mode 100644
index 00000000..a44bd5ad
--- /dev/null
+++ b/1151_linux-4.14.152.patch
@@ -0,0 +1,3858 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index b67a6cd08ca1..671f518b09ee 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4875,6 +4875,10 @@
+ the unplug protocol
+ never -- do not unplug even if version check succeeds
+
++ xen_legacy_crash [X86,XEN]
++ Crash from Xen panic notifier, without executing late
++ panic() code such as dumping handler.
++
+ xen_nopvspin [X86,XEN]
+ Disables the ticketlock slowpath using Xen PV
+ optimizations.
+diff --git a/Makefile b/Makefile
+index db996459d047..1d7f47334ca2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 151
++SUBLEVEL = 152
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 2db84df5eb42..20e45733afa4 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -43,11 +43,11 @@
+ #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
+ #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
+
+-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
++#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
++#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
++#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
++#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
++#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+
+ #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+@@ -71,8 +71,9 @@
+ #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+
+ #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+-#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+-#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
++/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
++#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+ #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
+diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
+index fac79d75d1d9..6eefd5873aef 100644
+--- a/arch/arm64/kernel/ftrace.c
++++ b/arch/arm64/kernel/ftrace.c
+@@ -119,7 +119,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+
+ /*
+ * Ensure updated trampoline is visible to instruction
+- * fetch before we patch in the branch.
++ * fetch before we patch in the branch. Although the
++ * architecture doesn't require an IPI in this case,
++ * Neoverse-N1 erratum #1542419 does require one
++ * if the TLB maintenance in module_enable_ro() is
++ * skipped due to rodata_enabled. It doesn't seem worth
++ * it to make it conditional given that this is
++ * certainly not a fast-path.
+ */
+ flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
+diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
+index 6aa264b9856a..7c6151d412bd 100644
+--- a/arch/mips/fw/sni/sniprom.c
++++ b/arch/mips/fw/sni/sniprom.c
+@@ -42,7 +42,7 @@
+
+ /* O32 stack has to be 8-byte aligned. */
+ static u64 o32_stk[4096];
+-#define O32_STK &o32_stk[sizeof(o32_stk)]
++#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
+
+ #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
+ __asm__(#fun " = call_o32")
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
+index 89e9fb7976fe..520ca166cbed 100644
+--- a/arch/mips/include/asm/cmpxchg.h
++++ b/arch/mips/include/asm/cmpxchg.h
+@@ -73,8 +73,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
+ extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
+ unsigned int size);
+
+-static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+- int size)
++static __always_inline
++unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
+ {
+ switch (size) {
+ case 1:
+@@ -146,8 +146,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+
+-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+- unsigned long new, unsigned int size)
++static __always_inline
++unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, unsigned int size)
+ {
+ switch (size) {
+ case 1:
+diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
+index c9a6d4f3403c..cfbd242c3e01 100644
+--- a/arch/powerpc/platforms/powernv/memtrace.c
++++ b/arch/powerpc/platforms/powernv/memtrace.c
+@@ -99,6 +99,7 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
+ return 0;
+ }
+
++/* called with device_hotplug_lock held */
+ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
+ {
+ u64 end_pfn = start_pfn + nr_pages - 1;
+@@ -139,6 +140,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
+ /* Trace memory needs to be aligned to the size */
+ end_pfn = round_down(end_pfn - nr_pages, nr_pages);
+
++ lock_device_hotplug();
+ for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
+ if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
+ /*
+@@ -147,7 +149,6 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
+ * we never try to remove memory that spans two iomem
+ * resources.
+ */
+- lock_device_hotplug();
+ end_pfn = base_pfn + nr_pages;
+ for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
+ remove_memory(nid, pfn << PAGE_SHIFT, bytes);
+@@ -156,6 +157,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
+ return base_pfn << PAGE_SHIFT;
+ }
+ }
++ unlock_device_hotplug();
+
+ return 0;
+ }
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 689eae8d3859..bd7a19a0aecf 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -95,7 +95,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+ __rc; \
+ })
+
+-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
++static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+ {
+ unsigned long spec = 0x810000UL;
+ int rc;
+@@ -125,7 +125,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+ return rc;
+ }
+
+-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
++static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+ {
+ unsigned long spec = 0x81UL;
+ int rc;
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index b9d8fe45737a..8f8456816d83 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
+ static ssize_t show_idle_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
+ struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
+- unsigned long long now, idle_time, idle_enter, idle_exit;
+ unsigned int seq;
+
+ do {
+- now = get_tod_clock();
+ seq = read_seqcount_begin(&idle->seqcount);
+ idle_time = READ_ONCE(idle->idle_time);
+ idle_enter = READ_ONCE(idle->clock_idle_enter);
+ idle_exit = READ_ONCE(idle->clock_idle_exit);
+ } while (read_seqcount_retry(&idle->seqcount, seq));
+- idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
++ in_idle = 0;
++ now = get_tod_clock();
++ if (idle_enter) {
++ if (idle_exit) {
++ in_idle = idle_exit - idle_enter;
++ } else if (now > idle_enter) {
++ in_idle = now - idle_enter;
++ }
++ }
++ idle_time += in_idle;
+ return sprintf(buf, "%llu\n", idle_time >> 12);
+ }
+ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+ u64 arch_cpu_idle_time(int cpu)
+ {
+ struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
+- unsigned long long now, idle_enter, idle_exit;
++ unsigned long long now, idle_enter, idle_exit, in_idle;
+ unsigned int seq;
+
+ do {
+- now = get_tod_clock();
+ seq = read_seqcount_begin(&idle->seqcount);
+ idle_enter = READ_ONCE(idle->clock_idle_enter);
+ idle_exit = READ_ONCE(idle->clock_idle_exit);
+ } while (read_seqcount_retry(&idle->seqcount, seq));
+-
+- return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
++ in_idle = 0;
++ now = get_tod_clock();
++ if (idle_enter) {
++ if (idle_exit) {
++ in_idle = idle_exit - idle_enter;
++ } else if (now > idle_enter) {
++ in_idle = now - idle_enter;
++ }
++ }
++ return cputime_to_nsecs(in_idle);
+ }
+
+ void arch_cpu_idle_enter(void)
+diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
+index 829c63dbc81a..c0e96bdac80a 100644
+--- a/arch/s390/mm/cmm.c
++++ b/arch/s390/mm/cmm.c
+@@ -307,16 +307,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ }
+
+ if (write) {
+- len = *lenp;
+- if (copy_from_user(buf, buffer,
+- len > sizeof(buf) ? sizeof(buf) : len))
++ len = min(*lenp, sizeof(buf));
++ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+- buf[sizeof(buf) - 1] = '\0';
++ buf[len - 1] = '\0';
+ cmm_skip_blanks(buf, &p);
+ nr = simple_strtoul(p, &p, 0);
+ cmm_skip_blanks(p, &p);
+ seconds = simple_strtoul(p, &p, 0);
+ cmm_set_timeout(nr, seconds);
++ *ppos += *lenp;
+ } else {
+ len = sprintf(buf, "%ld %ld\n",
+ cmm_timeout_pages, cmm_timeout_seconds);
+@@ -324,9 +324,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
+ len = *lenp;
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
++ *lenp = len;
++ *ppos += len;
+ }
+- *lenp = len;
+- *ppos += len;
+ return 0;
+ }
+
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 27ade3cb6482..defb536aebce 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -4,12 +4,14 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/jiffies.h>
+ #include <asm/apicdef.h>
+ #include <asm/nmi.h>
+
+ #include "../perf_event.h"
+
+-static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
++static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
++static unsigned long perf_nmi_window;
+
+ static __initconst const u64 amd_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+@@ -640,11 +642,12 @@ static void amd_pmu_disable_event(struct perf_event *event)
+ * handler when multiple PMCs are active or PMC overflow while handling some
+ * other source of an NMI.
+ *
+- * Attempt to mitigate this by using the number of active PMCs to determine
+- * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
+- * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
+- * number of active PMCs or 2. The value of 2 is used in case an NMI does not
+- * arrive at the LAPIC in time to be collapsed into an already pending NMI.
++ * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
++ * received during this window will be claimed. This prevents extending the
++ * window past when it is possible that latent NMIs should be received. The
++ * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
++ * handled a counter. When an un-handled NMI is received, it will be claimed
++ * only if arriving within that window.
+ */
+ static int amd_pmu_handle_irq(struct pt_regs *regs)
+ {
+@@ -662,21 +665,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
+ handled = x86_pmu_handle_irq(regs);
+
+ /*
+- * If a counter was handled, record the number of possible remaining
+- * NMIs that can occur.
++ * If a counter was handled, record a timestamp such that un-handled
++ * NMIs will be claimed if arriving within that window.
+ */
+ if (handled) {
+- this_cpu_write(perf_nmi_counter,
+- min_t(unsigned int, 2, active));
++ this_cpu_write(perf_nmi_tstamp,
++ jiffies + perf_nmi_window);
+
+ return handled;
+ }
+
+- if (!this_cpu_read(perf_nmi_counter))
++ if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
+ return NMI_DONE;
+
+- this_cpu_dec(perf_nmi_counter);
+-
+ return NMI_HANDLED;
+ }
+
+@@ -908,6 +909,9 @@ static int __init amd_core_pmu_init(void)
+ if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ return 0;
+
++ /* Avoid calulating the value each time in the NMI handler */
++ perf_nmi_window = msecs_to_jiffies(100);
++
+ switch (boot_cpu_data.x86) {
+ case 0x15:
+ pr_cont("Fam15h ");
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 038e4b63b56b..5cd7d4e1579d 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -6,7 +6,7 @@
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+- * "Extreme" ones, like Broadwell-E.
++ * "Extreme" ones, like Broadwell-E, or Atom microserver.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them. There's no processor called "SILVERMONT2".
+@@ -68,6 +68,7 @@
+ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
++#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
+
+ /* Xeon Phi */
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 9061babfbc83..335a62e74a2e 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -893,9 +893,6 @@ static void __init kexec_enter_virtual_mode(void)
+
+ if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
+ runtime_code_page_mkexec();
+-
+- /* clean DUMMY object */
+- efi_delete_dummy_variable();
+ #endif
+ }
+
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 515d5e4414c2..00fc683a2011 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -259,19 +259,41 @@ void xen_reboot(int reason)
+ BUG();
+ }
+
++static int reboot_reason = SHUTDOWN_reboot;
++static bool xen_legacy_crash;
+ void xen_emergency_restart(void)
+ {
+- xen_reboot(SHUTDOWN_reboot);
++ xen_reboot(reboot_reason);
+ }
+
+ static int
+ xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+- if (!kexec_crash_loaded())
+- xen_reboot(SHUTDOWN_crash);
++ if (!kexec_crash_loaded()) {
++ if (xen_legacy_crash)
++ xen_reboot(SHUTDOWN_crash);
++
++ reboot_reason = SHUTDOWN_crash;
++
++ /*
++ * If panic_timeout==0 then we are supposed to wait forever.
++ * However, to preserve original dom0 behavior we have to drop
++ * into hypervisor. (domU behavior is controlled by its
++ * config file)
++ */
++ if (panic_timeout == 0)
++ panic_timeout = -1;
++ }
+ return NOTIFY_DONE;
+ }
+
++static int __init parse_xen_legacy_crash(char *arg)
++{
++ xen_legacy_crash = true;
++ return 0;
++}
++early_param("xen_legacy_crash", parse_xen_legacy_crash);
++
+ static struct notifier_block xen_panic_block = {
+ .notifier_call = xen_panic_event,
+ .priority = INT_MIN
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index a23460084955..f3d0bc9a9905 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -228,8 +228,8 @@ static void nbd_put(struct nbd_device *nbd)
+ if (refcount_dec_and_mutex_lock(&nbd->refs,
+ &nbd_index_mutex)) {
+ idr_remove(&nbd_index_idr, nbd->index);
+- mutex_unlock(&nbd_index_mutex);
+ nbd_dev_remove(nbd);
++ mutex_unlock(&nbd_index_mutex);
+ }
+ }
+
+@@ -912,6 +912,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ return ret;
+ }
+
++static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
++ int *err)
++{
++ struct socket *sock;
++
++ *err = 0;
++ sock = sockfd_lookup(fd, err);
++ if (!sock)
++ return NULL;
++
++ if (sock->ops->shutdown == sock_no_shutdown) {
++ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
++ *err = -EINVAL;
++ return NULL;
++ }
++
++ return sock;
++}
++
+ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+ bool netlink)
+ {
+@@ -921,7 +940,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+ struct nbd_sock *nsock;
+ int err;
+
+- sock = sockfd_lookup(arg, &err);
++ sock = nbd_get_socket(nbd, arg, &err);
+ if (!sock)
+ return err;
+
+@@ -973,7 +992,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
+ int i;
+ int err;
+
+- sock = sockfd_lookup(arg, &err);
++ sock = nbd_get_socket(nbd, arg, &err);
+ if (!sock)
+ return err;
+
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 133178c9b2cf..1b4e195c0d3c 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -291,13 +291,14 @@ static void reset_bdev(struct zram *zram)
+ static ssize_t backing_dev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ struct file *file;
+ struct zram *zram = dev_to_zram(dev);
+- struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+- if (!zram_wb_enabled(zram)) {
++ file = zram->backing_dev;
++ if (!file) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
+index f5d54a64d33c..dddda45127a8 100644
+--- a/drivers/clk/imgtec/clk-boston.c
++++ b/drivers/clk/imgtec/clk-boston.c
+@@ -73,31 +73,39 @@ static void __init clk_boston_setup(struct device_node *np)
+ hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+- goto error;
++ goto fail_input;
+ }
+ onecell->hws[BOSTON_CLK_INPUT] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+- goto error;
++ goto fail_sys;
+ }
+ onecell->hws[BOSTON_CLK_SYS] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+- goto error;
++ goto fail_cpu;
+ }
+ onecell->hws[BOSTON_CLK_CPU] = hw;
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
+- if (err)
++ if (err) {
+ pr_err("failed to add DT provider: %d\n", err);
++ goto fail_clk_add;
++ }
+
+ return;
+
+-error:
++fail_clk_add:
++ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_CPU]);
++fail_cpu:
++ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_SYS]);
++fail_sys:
++ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_INPUT]);
++fail_input:
+ kfree(onecell);
+ }
+
+diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
+index ddd4a3932127..cf119a8ccdd5 100644
+--- a/drivers/dma/cppi41.c
++++ b/drivers/dma/cppi41.c
+@@ -585,9 +585,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+ {
+ struct cppi41_channel *c = to_cpp41_chan(chan);
++ struct dma_async_tx_descriptor *txd = NULL;
++ struct cppi41_dd *cdd = c->cdd;
+ struct cppi41_desc *d;
+ struct scatterlist *sg;
+ unsigned int i;
++ int error;
++
++ error = pm_runtime_get(cdd->ddev.dev);
++ if (error < 0) {
++ pm_runtime_put_noidle(cdd->ddev.dev);
++
++ return NULL;
++ }
++
++ if (cdd->is_suspended)
++ goto err_out_not_ready;
+
+ d = c->desc;
+ for_each_sg(sgl, sg, sg_len, i) {
+@@ -610,7 +623,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
+ d++;
+ }
+
+- return &c->txd;
++ txd = &c->txd;
++
++err_out_not_ready:
++ pm_runtime_mark_last_busy(cdd->ddev.dev);
++ pm_runtime_put_autosuspend(cdd->ddev.dev);
++
++ return txd;
+ }
+
+ static void cppi41_compute_td_desc(struct cppi41_desc *d)
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index db404aab82b2..209dc5aefc31 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -498,7 +498,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+ pcie->device_id.vendor_id, pcie->device_id.device_id);
+ p = pcie->device_id.class_code;
+- printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
++ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
+ }
+ if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+ printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
+index 538bce4b5b42..ac6c1c0548b6 100644
+--- a/drivers/gpio/gpio-max77620.c
++++ b/drivers/gpio/gpio-max77620.c
+@@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
+ case 0:
+ val = MAX77620_CNFG_GPIO_DBNC_None;
+ break;
+- case 1 ... 8:
++ case 1000 ... 8000:
+ val = MAX77620_CNFG_GPIO_DBNC_8ms;
+ break;
+- case 9 ... 16:
++ case 9000 ... 16000:
+ val = MAX77620_CNFG_GPIO_DBNC_16ms;
+ break;
+- case 17 ... 32:
++ case 17000 ... 32000:
+ val = MAX77620_CNFG_GPIO_DBNC_32ms;
+ break;
+ default:
+diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
+index a594e478a1e2..843aed4dec80 100644
+--- a/drivers/hid/hid-axff.c
++++ b/drivers/hid/hid-axff.c
+@@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid)
+ {
+ struct axff_device *axff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
++ struct hid_input *hidinput;
+ struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
+- struct input_dev *dev = hidinput->input;
++ struct input_dev *dev;
+ int field_count = 0;
+ int i, j;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 7c8049a5bd99..0b0fa257299d 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -979,6 +979,7 @@ int hid_open_report(struct hid_device *device)
+ __u8 *start;
+ __u8 *buf;
+ __u8 *end;
++ __u8 *next;
+ int ret;
+ static int (*dispatch_type[])(struct hid_parser *parser,
+ struct hid_item *item) = {
+@@ -1032,7 +1033,8 @@ int hid_open_report(struct hid_device *device)
+ device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+
+ ret = -EINVAL;
+- while ((start = fetch_item(start, end, &item)) != NULL) {
++ while ((next = fetch_item(start, end, &item)) != NULL) {
++ start = next;
+
+ if (item.format != HID_ITEM_FORMAT_SHORT) {
+ hid_err(device, "unexpected long global item\n");
+@@ -1061,7 +1063,8 @@ int hid_open_report(struct hid_device *device)
+ }
+ }
+
+- hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
++ hid_err(device, "item fetching failed at offset %u/%u\n",
++ size - (unsigned int)(end - start), size);
+ err:
+ vfree(parser);
+ hid_close_report(device);
+diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
+index 818ea7d93533..309969b8dc2e 100644
+--- a/drivers/hid/hid-dr.c
++++ b/drivers/hid/hid-dr.c
+@@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid)
+ {
+ struct drff_device *drff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_first_entry(&hid->inputs,
+- struct hid_input, list);
++ struct hid_input *hidinput;
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+- struct input_dev *dev = hidinput->input;
++ struct input_dev *dev;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
+index d82d75bb11f7..80f9a02dfa69 100644
+--- a/drivers/hid/hid-emsff.c
++++ b/drivers/hid/hid-emsff.c
+@@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid)
+ {
+ struct emsff_device *emsff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_first_entry(&hid->inputs,
+- struct hid_input, list);
++ struct hid_input *hidinput;
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+- struct input_dev *dev = hidinput->input;
++ struct input_dev *dev;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
+index 2d8cead3adca..5a02c50443cb 100644
+--- a/drivers/hid/hid-gaff.c
++++ b/drivers/hid/hid-gaff.c
+@@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid)
+ {
+ struct gaff_device *gaff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_entry(hid->inputs.next,
+- struct hid_input, list);
++ struct hid_input *hidinput;
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct list_head *report_ptr = report_list;
+- struct input_dev *dev = hidinput->input;
++ struct input_dev *dev;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
+index 9325545fc3ae..3e84551cca9c 100644
+--- a/drivers/hid/hid-holtekff.c
++++ b/drivers/hid/hid-holtekff.c
+@@ -140,13 +140,19 @@ static int holtekff_init(struct hid_device *hid)
+ {
+ struct holtekff_device *holtekff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_entry(hid->inputs.next,
+- struct hid_input, list);
++ struct hid_input *hidinput;
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+- struct input_dev *dev = hidinput->input;
++ struct input_dev *dev;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output report found\n");
+ return -ENODEV;
+diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
+index 5f1de24206ab..220b3e5c9c39 100644
+--- a/drivers/hid/hid-hyperv.c
++++ b/drivers/hid/hid-hyperv.c
+@@ -322,60 +322,24 @@ static void mousevsc_on_receive(struct hv_device *device,
+
+ static void mousevsc_on_channel_callback(void *context)
+ {
+- const int packet_size = 0x100;
+- int ret;
+ struct hv_device *device = context;
+- u32 bytes_recvd;
+- u64 req_id;
+ struct vmpacket_descriptor *desc;
+- unsigned char *buffer;
+- int bufferlen = packet_size;
+-
+- buffer = kmalloc(bufferlen, GFP_ATOMIC);
+- if (!buffer)
+- return;
+-
+- do {
+- ret = vmbus_recvpacket_raw(device->channel, buffer,
+- bufferlen, &bytes_recvd, &req_id);
+-
+- switch (ret) {
+- case 0:
+- if (bytes_recvd <= 0) {
+- kfree(buffer);
+- return;
+- }
+- desc = (struct vmpacket_descriptor *)buffer;
+-
+- switch (desc->type) {
+- case VM_PKT_COMP:
+- break;
+-
+- case VM_PKT_DATA_INBAND:
+- mousevsc_on_receive(device, desc);
+- break;
+-
+- default:
+- pr_err("unhandled packet type %d, tid %llx len %d\n",
+- desc->type, req_id, bytes_recvd);
+- break;
+- }
+
++ foreach_vmbus_pkt(desc, device->channel) {
++ switch (desc->type) {
++ case VM_PKT_COMP:
+ break;
+
+- case -ENOBUFS:
+- kfree(buffer);
+- /* Handle large packet */
+- bufferlen = bytes_recvd;
+- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+-
+- if (!buffer)
+- return;
++ case VM_PKT_DATA_INBAND:
++ mousevsc_on_receive(device, desc);
++ break;
+
++ default:
++ pr_err("Unhandled packet type %d, tid %llx len %d\n",
++ desc->type, desc->trans_id, desc->len8 * 8);
+ break;
+ }
+- } while (1);
+-
++ }
+ }
+
+ static int mousevsc_connect_to_vsp(struct hv_device *device)
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index d723185de3ba..9d24fb0715ba 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -328,6 +328,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
+ USB_DEVICE_ID_SYMBOL_SCANNER_3),
+ HID_BATTERY_QUIRK_IGNORE },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
++ USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
++ HID_BATTERY_QUIRK_IGNORE },
+ {}
+ };
+
+diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
+index 0e3fb1a7e421..6909d9c2fc67 100644
+--- a/drivers/hid/hid-lg2ff.c
++++ b/drivers/hid/hid-lg2ff.c
+@@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid)
+ {
+ struct lg2ff_device *lg2ff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_entry(hid->inputs.next,
+- struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ int error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ /* Check that the report looks ok */
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
+ if (!report)
+diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
+index 8c2da183d3bc..acf739fc4060 100644
+--- a/drivers/hid/hid-lg3ff.c
++++ b/drivers/hid/hid-lg3ff.c
+@@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = {
+
+ int lg3ff_init(struct hid_device *hid)
+ {
+- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ const signed short *ff_bits = ff3_joystick_ac;
+ int error;
+ int i;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ /* Check that the report looks ok */
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
+ return -ENODEV;
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 127f1335a1da..1b109a5cf922 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -1261,8 +1261,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
+
+ int lg4ff_init(struct hid_device *hid)
+ {
+- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+@@ -1274,6 +1274,13 @@ int lg4ff_init(struct hid_device *hid)
+ int mmode_ret, mmode_idx = -1;
+ u16 real_product_id;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ /* Check that the report looks ok */
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -1;
+diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
+index e1394af0ae7b..1871cdcd1e0a 100644
+--- a/drivers/hid/hid-lgff.c
++++ b/drivers/hid/hid-lgff.c
+@@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
+
+ int lgff_init(struct hid_device* hid)
+ {
+- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ const signed short *ff_bits = ff_joystick;
+ int error;
+ int i;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ /* Check that the report looks ok */
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index b705cbb58ca6..4706fb852eaf 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1867,8 +1867,8 @@ static void hidpp_ff_destroy(struct ff_device *ff)
+ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+ {
+ struct hid_device *hid = hidpp->hid_dev;
+- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+ const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ struct ff_device *ff;
+@@ -1877,6 +1877,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+ int error, j, num_slots;
+ u8 version;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ if (!dev) {
+ hid_err(hid, "Struct input_dev not set!\n");
+ return -EINVAL;
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 6ce9b5e1a06f..c8b07a182c0b 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -2163,9 +2163,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
+
+ static int sony_init_ff(struct sony_sc *sc)
+ {
+- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
+- struct hid_input, list);
+- struct input_dev *input_dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *input_dev;
++
++ if (list_empty(&sc->hdev->inputs)) {
++ hid_err(sc->hdev, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
++ input_dev = hidinput->input;
+
+ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+ return input_ff_create_memless(input_dev, NULL, sony_play_effect);
+diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
+index cfa0cb22c9b3..d98e471a5f7b 100644
+--- a/drivers/hid/hid-tmff.c
++++ b/drivers/hid/hid-tmff.c
+@@ -136,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
+ struct tmff_device *tmff;
+ struct hid_report *report;
+ struct list_head *report_list;
+- struct hid_input *hidinput = list_entry(hid->inputs.next,
+- struct hid_input, list);
+- struct input_dev *input_dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *input_dev;
+ int error;
+ int i;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ input_dev = hidinput->input;
++
+ tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
+ if (!tmff)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
+index a29756c6ca02..4e7e01be99b1 100644
+--- a/drivers/hid/hid-zpff.c
++++ b/drivers/hid/hid-zpff.c
+@@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid)
+ {
+ struct zpff_device *zpff;
+ struct hid_report *report;
+- struct hid_input *hidinput = list_entry(hid->inputs.next,
+- struct hid_input, list);
+- struct input_dev *dev = hidinput->input;
++ struct hid_input *hidinput;
++ struct input_dev *dev;
+ int i, error;
+
++ if (list_empty(&hid->inputs)) {
++ hid_err(hid, "no inputs found\n");
++ return -ENODEV;
++ }
++ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
++ dev = hidinput->input;
++
+ for (i = 0; i < 4; i++) {
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
+ if (!report)
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+index cac262a912c1..10af8585c820 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
++ {
++ /*
++ * There are at least 2 Primebook C11B versions, the older
++ * version has a product-name of "Primebook C11B", and a
++ * bios version / release / firmware revision of:
++ * V2.1.2 / 05/03/2018 / 18.2
++ * The new version has "PRIMEBOOK C11B" as product-name and a
++ * bios version / release / firmware revision of:
++ * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
++ * Only the older version needs this quirk, note the newer
++ * version will not match as it has a different product-name.
++ */
++ .ident = "Trekstor Primebook C11B",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
++ },
++ .driver_data = (void *)&sipodev_desc
++ },
+ {
+ .ident = "Direkt-Tek DTLAPY116-2",
+ .matches = {
+@@ -330,6 +349,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
++ {
++ .ident = "Direkt-Tek DTLAPY133-1",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
++ },
++ .driver_data = (void *)&sipodev_desc
++ },
+ {
+ .ident = "Mediacom Flexbook Edge 11",
+ .matches = {
+@@ -338,6 +365,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
++ {
++ .ident = "Odys Winbook 13",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"),
++ },
++ .driver_data = (void *)&sipodev_desc
++ },
+ { } /* Terminate list */
+ };
+
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index 807299dd45eb..7e86a5b7ec4e 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -125,7 +125,7 @@
+ #define BMC150_ACCEL_SLEEP_1_SEC 0x0F
+
+ #define BMC150_ACCEL_REG_TEMP 0x08
+-#define BMC150_ACCEL_TEMP_CENTER_VAL 24
++#define BMC150_ACCEL_TEMP_CENTER_VAL 23
+
+ #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
+ #define BMC150_AUTO_SUSPEND_DELAY_MS 2000
+diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
+index 2515badf8b28..9b2121f24926 100644
+--- a/drivers/iio/adc/meson_saradc.c
++++ b/drivers/iio/adc/meson_saradc.c
+@@ -976,6 +976,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
++ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
++ priv->data->regmap_config);
++ if (IS_ERR(priv->regmap))
++ return PTR_ERR(priv->regmap);
++
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+@@ -985,11 +990,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
+- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+- priv->data->regmap_config);
+- if (IS_ERR(priv->regmap))
+- return PTR_ERR(priv->regmap);
+-
+ priv->clkin = devm_clk_get(&pdev->dev, "clkin");
+ if (IS_ERR(priv->clkin)) {
+ dev_err(&pdev->dev, "failed to get clkin\n");
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 7c5eca312aa8..f698c6a28c14 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2212,9 +2212,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ conn_id->cm_id.iw = NULL;
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
+ mutex_unlock(&conn_id->handler_mutex);
++ mutex_unlock(&listen_id->handler_mutex);
+ cma_deref_id(conn_id);
+ rdma_destroy_id(&conn_id->id);
+- goto out;
++ return ret;
+ }
+
+ mutex_unlock(&conn_id->handler_mutex);
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 6781bcdb10b3..741938409f8e 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1529,8 +1529,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
+ }
+
+ ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
+- if (ret < 0)
++ if (ret < 0) {
++ kfree(tmp_sdma_rht);
+ goto bail;
++ }
++
+ dd->sdma_rht = tmp_sdma_rht;
+
+ dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
+diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
+index 874841f0fc83..10532a76688e 100644
+--- a/drivers/md/dm-bio-prison-v1.c
++++ b/drivers/md/dm-bio-prison-v1.c
+@@ -33,7 +33,7 @@ static struct kmem_cache *_cell_cache;
+ */
+ struct dm_bio_prison *dm_bio_prison_create(void)
+ {
+- struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
++ struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
+
+ if (!prison)
+ return NULL;
+diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
+index 8ce3a1a588cf..c34ec615420f 100644
+--- a/drivers/md/dm-bio-prison-v2.c
++++ b/drivers/md/dm-bio-prison-v2.c
+@@ -35,7 +35,7 @@ static struct kmem_cache *_cell_cache;
+ */
+ struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
+ {
+- struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
++ struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
+
+ if (!prison)
+ return NULL;
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index b4357ed4d541..56e2c0e079d7 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -50,7 +50,7 @@ struct dm_io_client *dm_io_client_create(void)
+ struct dm_io_client *client;
+ unsigned min_ios = dm_get_reserved_bio_based_ios();
+
+- client = kmalloc(sizeof(*client), GFP_KERNEL);
++ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index bd9a45b94b55..7ca2b1aaa79d 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -892,7 +892,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
+ int r = -ENOMEM;
+ struct dm_kcopyd_client *kc;
+
+- kc = kmalloc(sizeof(*kc), GFP_KERNEL);
++ kc = kzalloc(sizeof(*kc), GFP_KERNEL);
+ if (!kc)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 85c32b22a420..91c6f6d72eee 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -179,7 +179,7 @@ struct dm_region_hash *dm_region_hash_create(
+ ;
+ nr_buckets >>= 1;
+
+- rh = kmalloc(sizeof(*rh), GFP_KERNEL);
++ rh = kzalloc(sizeof(*rh), GFP_KERNEL);
+ if (!rh) {
+ DMERR("unable to allocate region hash memory");
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index b502debc6df3..2170f6c118b8 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -19,7 +19,6 @@
+ #include <linux/vmalloc.h>
+ #include <linux/log2.h>
+ #include <linux/dm-kcopyd.h>
+-#include <linux/semaphore.h>
+
+ #include "dm.h"
+
+@@ -48,7 +47,7 @@ struct dm_exception_table {
+ };
+
+ struct dm_snapshot {
+- struct rw_semaphore lock;
++ struct mutex lock;
+
+ struct dm_dev *origin;
+ struct dm_dev *cow;
+@@ -106,8 +105,8 @@ struct dm_snapshot {
+ /* The on disk metadata handler */
+ struct dm_exception_store *store;
+
+- /* Maximum number of in-flight COW jobs. */
+- struct semaphore cow_count;
++ unsigned in_progress;
++ struct wait_queue_head in_progress_wait;
+
+ struct dm_kcopyd_client *kcopyd_client;
+
+@@ -158,8 +157,8 @@ struct dm_snapshot {
+ */
+ #define DEFAULT_COW_THRESHOLD 2048
+
+-static int cow_threshold = DEFAULT_COW_THRESHOLD;
+-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
++static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
++module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
+ MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
+ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+@@ -456,9 +455,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
+ if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
+ continue;
+
+- down_read(&s->lock);
++ mutex_lock(&s->lock);
+ active = s->active;
+- up_read(&s->lock);
++ mutex_unlock(&s->lock);
+
+ if (active) {
+ if (snap_src)
+@@ -926,7 +925,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
+ int r;
+ chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
+
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+
+ /*
+ * Process chunks (and associated exceptions) in reverse order
+@@ -941,7 +940,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
+ b = __release_queued_bios_after_merge(s);
+
+ out:
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ if (b)
+ flush_bios(b);
+
+@@ -1000,9 +999,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+ if (linear_chunks < 0) {
+ DMERR("Read error in exception store: "
+ "shutting down merge");
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ s->merge_failed = 1;
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ }
+ goto shut;
+ }
+@@ -1043,10 +1042,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+ previous_count = read_pending_exceptions_done_count();
+ }
+
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ s->first_merging_chunk = old_chunk;
+ s->num_merging_chunks = linear_chunks;
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+
+ /* Wait until writes to all 'linear_chunks' drain */
+ for (i = 0; i < linear_chunks; i++)
+@@ -1088,10 +1087,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
+ return;
+
+ shut:
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ s->merge_failed = 1;
+ b = __release_queued_bios_after_merge(s);
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ error_bios(b);
+
+ merge_shutdown(s);
+@@ -1137,7 +1136,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ origin_mode = FMODE_WRITE;
+ }
+
+- s = kmalloc(sizeof(*s), GFP_KERNEL);
++ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ti->error = "Cannot allocate private snapshot structure";
+ r = -ENOMEM;
+@@ -1190,7 +1189,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->exception_start_sequence = 0;
+ s->exception_complete_sequence = 0;
+ INIT_LIST_HEAD(&s->out_of_order_list);
+- init_rwsem(&s->lock);
++ mutex_init(&s->lock);
+ INIT_LIST_HEAD(&s->list);
+ spin_lock_init(&s->pe_lock);
+ s->state_bits = 0;
+@@ -1206,7 +1205,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ goto bad_hash_tables;
+ }
+
+- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
++ init_waitqueue_head(&s->in_progress_wait);
+
+ s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+ if (IS_ERR(s->kcopyd_client)) {
+@@ -1357,9 +1356,9 @@ static void snapshot_dtr(struct dm_target *ti)
+ /* Check whether exception handover must be cancelled */
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest && (s == snap_src)) {
+- down_write(&snap_dest->lock);
++ mutex_lock(&snap_dest->lock);
+ snap_dest->valid = 0;
+- up_write(&snap_dest->lock);
++ mutex_unlock(&snap_dest->lock);
+ DMERR("Cancelling snapshot handover.");
+ }
+ up_read(&_origins_lock);
+@@ -1390,13 +1389,62 @@ static void snapshot_dtr(struct dm_target *ti)
+
+ dm_exception_store_destroy(s->store);
+
++ mutex_destroy(&s->lock);
++
+ dm_put_device(ti, s->cow);
+
+ dm_put_device(ti, s->origin);
+
++ WARN_ON(s->in_progress);
++
+ kfree(s);
+ }
+
++static void account_start_copy(struct dm_snapshot *s)
++{
++ spin_lock(&s->in_progress_wait.lock);
++ s->in_progress++;
++ spin_unlock(&s->in_progress_wait.lock);
++}
++
++static void account_end_copy(struct dm_snapshot *s)
++{
++ spin_lock(&s->in_progress_wait.lock);
++ BUG_ON(!s->in_progress);
++ s->in_progress--;
++ if (likely(s->in_progress <= cow_threshold) &&
++ unlikely(waitqueue_active(&s->in_progress_wait)))
++ wake_up_locked(&s->in_progress_wait);
++ spin_unlock(&s->in_progress_wait.lock);
++}
++
++static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
++{
++ if (unlikely(s->in_progress > cow_threshold)) {
++ spin_lock(&s->in_progress_wait.lock);
++ if (likely(s->in_progress > cow_threshold)) {
++ /*
++ * NOTE: this throttle doesn't account for whether
++ * the caller is servicing an IO that will trigger a COW
++ * so excess throttling may result for chunks not required
++ * to be COW'd. But if cow_threshold was reached, extra
++ * throttling is unlikely to negatively impact performance.
++ */
++ DECLARE_WAITQUEUE(wait, current);
++ __add_wait_queue(&s->in_progress_wait, &wait);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock(&s->in_progress_wait.lock);
++ if (unlock_origins)
++ up_read(&_origins_lock);
++ io_schedule();
++ remove_wait_queue(&s->in_progress_wait, &wait);
++ return false;
++ }
++ spin_unlock(&s->in_progress_wait.lock);
++ }
++ return true;
++}
++
+ /*
+ * Flush a list of buffers.
+ */
+@@ -1412,7 +1460,7 @@ static void flush_bios(struct bio *bio)
+ }
+ }
+
+-static int do_origin(struct dm_dev *origin, struct bio *bio);
++static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
+
+ /*
+ * Flush a list of buffers.
+@@ -1425,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
+ while (bio) {
+ n = bio->bi_next;
+ bio->bi_next = NULL;
+- r = do_origin(s->origin, bio);
++ r = do_origin(s->origin, bio, false);
+ if (r == DM_MAPIO_REMAPPED)
+ generic_make_request(bio);
+ bio = n;
+@@ -1477,7 +1525,7 @@ static void pending_complete(void *context, int success)
+
+ if (!success) {
+ /* Read/write error - snapshot is unusable */
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ __invalidate_snapshot(s, -EIO);
+ error = 1;
+ goto out;
+@@ -1485,14 +1533,14 @@ static void pending_complete(void *context, int success)
+
+ e = alloc_completed_exception(GFP_NOIO);
+ if (!e) {
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ __invalidate_snapshot(s, -ENOMEM);
+ error = 1;
+ goto out;
+ }
+ *e = pe->e;
+
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ if (!s->valid) {
+ free_completed_exception(e);
+ error = 1;
+@@ -1517,7 +1565,7 @@ out:
+ full_bio->bi_end_io = pe->full_bio_end_io;
+ increment_pending_exceptions_done_count();
+
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+
+ /* Submit any pending write bios */
+ if (error) {
+@@ -1579,7 +1627,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
+ }
+ list_add(&pe->out_of_order_entry, lh);
+ }
+- up(&s->cow_count);
++ account_end_copy(s);
+ }
+
+ /*
+@@ -1603,7 +1651,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
+ dest.count = src.count;
+
+ /* Hand over to kcopyd */
+- down(&s->cow_count);
++ account_start_copy(s);
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+ }
+
+@@ -1623,7 +1671,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
+ pe->full_bio = bio;
+ pe->full_bio_end_io = bio->bi_end_io;
+
+- down(&s->cow_count);
++ account_start_copy(s);
+ callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+ copy_callback, pe);
+
+@@ -1714,9 +1762,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
+ if (!s->valid)
+ return DM_MAPIO_KILL;
+
+- /* FIXME: should only take write lock if we need
+- * to copy an exception */
+- down_write(&s->lock);
++ if (bio_data_dir(bio) == WRITE) {
++ while (unlikely(!wait_for_in_progress(s, false)))
++ ; /* wait_for_in_progress() has slept */
++ }
++
++ mutex_lock(&s->lock);
+
+ if (!s->valid || (unlikely(s->snapshot_overflowed) &&
+ bio_data_dir(bio) == WRITE)) {
+@@ -1739,9 +1790,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
+ if (bio_data_dir(bio) == WRITE) {
+ pe = __lookup_pending_exception(s, chunk);
+ if (!pe) {
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ pe = alloc_pending_exception(s);
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+
+ if (!s->valid || s->snapshot_overflowed) {
+ free_pending_exception(pe);
+@@ -1776,7 +1827,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
+ bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT)) {
+ pe->started = 1;
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ start_full_bio(pe, bio);
+ goto out;
+ }
+@@ -1786,7 +1837,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
+ if (!pe->started) {
+ /* this is protected by snap->lock */
+ pe->started = 1;
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ start_copy(pe);
+ goto out;
+ }
+@@ -1796,7 +1847,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
+ }
+
+ out_unlock:
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ out:
+ return r;
+ }
+@@ -1832,7 +1883,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
+
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+
+ /* Full merging snapshots are redirected to the origin */
+ if (!s->valid)
+@@ -1863,12 +1914,12 @@ redirect_to_origin:
+ bio_set_dev(bio, s->origin->bdev);
+
+ if (bio_data_dir(bio) == WRITE) {
+- up_write(&s->lock);
+- return do_origin(s->origin, bio);
++ mutex_unlock(&s->lock);
++ return do_origin(s->origin, bio, false);
+ }
+
+ out_unlock:
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+
+ return r;
+ }
+@@ -1900,7 +1951,7 @@ static int snapshot_preresume(struct dm_target *ti)
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+- down_read(&snap_src->lock);
++ mutex_lock(&snap_src->lock);
+ if (s == snap_src) {
+ DMERR("Unable to resume snapshot source until "
+ "handover completes.");
+@@ -1910,7 +1961,7 @@ static int snapshot_preresume(struct dm_target *ti)
+ "source is suspended.");
+ r = -EINVAL;
+ }
+- up_read(&snap_src->lock);
++ mutex_unlock(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+@@ -1956,11 +2007,11 @@ static void snapshot_resume(struct dm_target *ti)
+
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+- down_write(&snap_src->lock);
+- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
++ mutex_lock(&snap_src->lock);
++ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ __handover_exceptions(snap_src, snap_dest);
+- up_write(&snap_dest->lock);
+- up_write(&snap_src->lock);
++ mutex_unlock(&snap_dest->lock);
++ mutex_unlock(&snap_src->lock);
+ }
+
+ up_read(&_origins_lock);
+@@ -1975,9 +2026,9 @@ static void snapshot_resume(struct dm_target *ti)
+ /* Now we have correct chunk size, reregister */
+ reregister_snapshot(s);
+
+- down_write(&s->lock);
++ mutex_lock(&s->lock);
+ s->active = 1;
+- up_write(&s->lock);
++ mutex_unlock(&s->lock);
+ }
+
+ static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
+@@ -2017,7 +2068,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
+ switch (type) {
+ case STATUSTYPE_INFO:
+
+- down_write(&snap->lock);
++ mutex_lock(&snap->lock);
+
+ if (!snap->valid)
+ DMEMIT("Invalid");
+@@ -2042,7 +2093,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
+ DMEMIT("Unknown");
+ }
+
+- up_write(&snap->lock);
++ mutex_unlock(&snap->lock);
+
+ break;
+
+@@ -2108,7 +2159,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
+ if (dm_target_is_snapshot_merge(snap->ti))
+ continue;
+
+- down_write(&snap->lock);
++ mutex_lock(&snap->lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
+@@ -2135,9 +2186,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
+
+ pe = __lookup_pending_exception(snap, chunk);
+ if (!pe) {
+- up_write(&snap->lock);
++ mutex_unlock(&snap->lock);
+ pe = alloc_pending_exception(snap);
+- down_write(&snap->lock);
++ mutex_lock(&snap->lock);
+
+ if (!snap->valid) {
+ free_pending_exception(pe);
+@@ -2180,7 +2231,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
+ }
+
+ next_snapshot:
+- up_write(&snap->lock);
++ mutex_unlock(&snap->lock);
+
+ if (pe_to_start_now) {
+ start_copy(pe_to_start_now);
+@@ -2201,15 +2252,24 @@ next_snapshot:
+ /*
+ * Called on a write from the origin driver.
+ */
+-static int do_origin(struct dm_dev *origin, struct bio *bio)
++static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
+ {
+ struct origin *o;
+ int r = DM_MAPIO_REMAPPED;
+
++again:
+ down_read(&_origins_lock);
+ o = __lookup_origin(origin->bdev);
+- if (o)
++ if (o) {
++ if (limit) {
++ struct dm_snapshot *s;
++ list_for_each_entry(s, &o->snapshots, list)
++ if (unlikely(!wait_for_in_progress(s, true)))
++ goto again;
++ }
++
+ r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
++ }
+ up_read(&_origins_lock);
+
+ return r;
+@@ -2322,7 +2382,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
+ dm_accept_partial_bio(bio, available_sectors);
+
+ /* Only tell snapshots if this is a write */
+- return do_origin(o->dev, bio);
++ return do_origin(o->dev, bio, true);
+ }
+
+ static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index aa7795990989..0ee5eae71690 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2962,7 +2962,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
+ return (struct pool *)pmd;
+ }
+
+- pool = kmalloc(sizeof(*pool), GFP_KERNEL);
++ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ *error = "Error allocating memory for pool";
+ err_p = ERR_PTR(-ENOMEM);
+diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
+index 70cee5c0c89a..29a16f8a4123 100644
+--- a/drivers/media/platform/vimc/vimc-sensor.c
++++ b/drivers/media/platform/vimc/vimc-sensor.c
+@@ -200,13 +200,6 @@ static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
+ {
+ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
+ ved);
+- const struct vimc_pix_map *vpix;
+- unsigned int frame_size;
+-
+- /* Calculate the frame size */
+- vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
+- frame_size = vsen->mbus_format.width * vpix->bpp *
+- vsen->mbus_format.height;
+
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+ return vsen->frame;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c1eeba1906fd..1cc4c99aa504 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3992,7 +3992,7 @@ out:
+ * this to-be-skipped slave to send a packet out.
+ */
+ old_arr = rtnl_dereference(bond->slave_arr);
+- for (idx = 0; idx < old_arr->count; idx++) {
++ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
+ if (skipslave == old_arr->arr[idx]) {
+ old_arr->arr[idx] =
+ old_arr->arr[old_arr->count-1];
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 5c74787f903b..a909aa315a92 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -4077,9 +4077,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
+ if (err)
+ goto err_col_port_add;
+- err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
+- if (err)
+- goto err_col_port_enable;
+
+ mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
+ mlxsw_sp_port->local_port);
+@@ -4094,8 +4091,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+
+ return 0;
+
+-err_col_port_enable:
+- mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
+ err_col_port_add:
+ if (!lag->ref_count)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+@@ -4114,7 +4109,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
+ WARN_ON(lag->ref_count == 0);
+
+- mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
+
+ /* Any VLANs configured on the port are no longer valid */
+@@ -4159,21 +4153,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ }
+
+-static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
+- bool lag_tx_enabled)
++static int
++mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
+ {
+- if (lag_tx_enabled)
+- return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
+- mlxsw_sp_port->lag_id);
+- else
+- return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
+- mlxsw_sp_port->lag_id);
++ int err;
++
++ err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
++ mlxsw_sp_port->lag_id);
++ if (err)
++ return err;
++
++ err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
++ if (err)
++ goto err_dist_port_add;
++
++ return 0;
++
++err_dist_port_add:
++ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
++ return err;
++}
++
++static int
++mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
++{
++ int err;
++
++ err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
++ mlxsw_sp_port->lag_id);
++ if (err)
++ return err;
++
++ err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
++ mlxsw_sp_port->lag_id);
++ if (err)
++ goto err_col_port_disable;
++
++ return 0;
++
++err_col_port_disable:
++ mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
++ return err;
+ }
+
+ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netdev_lag_lower_state_info *info)
+ {
+- return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
++ if (info->tx_enabled)
++ return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
++ else
++ return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
+ }
+
+ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+@@ -4309,8 +4338,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
+ upper_dev);
+ } else {
+- mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
+- false);
++ mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
+ mlxsw_sp_port_lag_leave(mlxsw_sp_port,
+ upper_dev);
+ }
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index 35f39f23d881..8f8c9ede88c2 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net)
+ static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+ {
+ struct usbnet *dev = netdev_priv(net);
+- __le16 res;
++ __le16 res = 0;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
+index 4defb7a0330f..53b66e9434c9 100644
+--- a/drivers/net/wireless/ath/ath6kl/usb.c
++++ b/drivers/net/wireless/ath/ath6kl/usb.c
+@@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
+ struct ath6kl_urb_context *urb_context = NULL;
+ unsigned long flags;
+
++ /* bail if this pipe is not initialized */
++ if (!pipe->ar_usb)
++ return NULL;
++
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ if (!list_empty(&pipe->urb_list_head)) {
+ urb_context =
+@@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
+ {
+ unsigned long flags;
+
++ /* bail if this pipe is not initialized */
++ if (!pipe->ar_usb)
++ return;
++
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ pipe->urb_cnt++;
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
+index f6d00613c53d..e1297809535f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
++++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
+@@ -774,6 +774,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
++ if (noa_num > P2P_MAX_NOA_NUM)
++ noa_num = P2P_MAX_NOA_NUM;
++
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
+@@ -868,6 +871,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
++ if (noa_num > P2P_MAX_NOA_NUM)
++ noa_num = P2P_MAX_NOA_NUM;
++
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index 5d823e965883..fcb57d64d97e 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -559,18 +559,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
+
+ rc = pn533_finalize_setup(priv);
+ if (rc)
+- goto error;
++ goto err_deregister;
+
+ usb_set_intfdata(interface, phy);
+
+ return 0;
+
++err_deregister:
++ pn533_unregister_device(phy->priv);
+ error:
++ usb_kill_urb(phy->in_urb);
++ usb_kill_urb(phy->out_urb);
++ usb_kill_urb(phy->ack_urb);
++
+ usb_free_urb(phy->in_urb);
+ usb_free_urb(phy->out_urb);
+ usb_free_urb(phy->ack_urb);
+ usb_put_dev(phy->udev);
+ kfree(in_buf);
++ kfree(phy->ack_buffer);
+
+ return rc;
+ }
+diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
+index c2e6e3d1073f..5500660bbb10 100644
+--- a/drivers/pci/pcie/pme.c
++++ b/drivers/pci/pcie/pme.c
+@@ -441,6 +441,7 @@ static void pcie_pme_remove(struct pcie_device *srv)
+
+ pcie_pme_disable_interrupt(srv->port, data);
+ free_irq(srv->irq, srv);
++ cancel_work_sync(&data->work);
+ kfree(data);
+ }
+
+diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
+index d19307f791c6..9e6472834e37 100644
+--- a/drivers/power/supply/max14656_charger_detector.c
++++ b/drivers/power/supply/max14656_charger_detector.c
+@@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ };
+
++static void stop_irq_work(void *data)
++{
++ struct max14656_chip *chip = data;
++
++ cancel_delayed_work_sync(&chip->irq_work);
++}
++
++
+ static int max14656_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -278,8 +286,6 @@ static int max14656_probe(struct i2c_client *client,
+ if (ret)
+ return -ENODEV;
+
+- INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+-
+ chip->detect_psy = devm_power_supply_register(dev,
+ &chip->psy_desc, &psy_cfg);
+ if (IS_ERR(chip->detect_psy)) {
+@@ -287,6 +293,13 @@ static int max14656_probe(struct i2c_client *client,
+ return -EINVAL;
+ }
+
++ INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
++ ret = devm_add_action(dev, stop_irq_work, chip);
++ if (ret) {
++ dev_err(dev, "devm_add_action %d failed\n", ret);
++ return ret;
++ }
++
+ ret = devm_request_irq(dev, chip->irq, max14656_irq,
+ IRQF_TRIGGER_FALLING,
+ MAX14656_NAME, chip);
+diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
+index 3c8c6f942e67..a06792966ea9 100644
+--- a/drivers/rtc/rtc-pcf8523.c
++++ b/drivers/rtc/rtc-pcf8523.c
+@@ -94,8 +94,9 @@ static int pcf8523_voltage_low(struct i2c_client *client)
+ return !!(value & REG_CONTROL3_BLF);
+ }
+
+-static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
++static int pcf8523_load_capacitance(struct i2c_client *client)
+ {
++ u32 load;
+ u8 value;
+ int err;
+
+@@ -103,14 +104,24 @@ static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
+ if (err < 0)
+ return err;
+
+- if (!high)
+- value &= ~REG_CONTROL1_CAP_SEL;
+- else
++ load = 12500;
++ of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads",
++ &load);
++
++ switch (load) {
++ default:
++ dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
++ load);
++ /* fall through */
++ case 12500:
+ value |= REG_CONTROL1_CAP_SEL;
++ break;
++ case 7000:
++ value &= ~REG_CONTROL1_CAP_SEL;
++ break;
++ }
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+- if (err < 0)
+- return err;
+
+ return err;
+ }
+@@ -307,9 +318,10 @@ static int pcf8523_probe(struct i2c_client *client,
+ if (!pcf)
+ return -ENOMEM;
+
+- err = pcf8523_select_capacitance(client, true);
++ err = pcf8523_load_capacitance(client);
+ if (err < 0)
+- return err;
++ dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
++ err);
+
+ err = pcf8523_set_pm(client, 0);
+ if (err < 0)
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 4ade13d72deb..07cb671bb855 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -4152,7 +4152,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ /* If pCmd was set to NULL from abort path, do not call scsi_done */
+ if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+- "0711 FCP cmd already NULL, sid: 0x%06x, "
++ "5688 FCP cmd already NULL, sid: 0x%06x, "
+ "did: 0x%06x, oxid: 0x%04x\n",
+ vport->fc_myDID,
+ (pnode) ? pnode->nlp_DID : 0,
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 2fc7056cbff7..77c339a93525 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -357,8 +357,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
+ }
+
+ padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
+- if (!padapter->HalData)
+- DBG_88E("cant not alloc memory for HAL DATA\n");
++ if (!padapter->HalData) {
++ DBG_88E("Failed to allocate memory for HAL data\n");
++ goto free_adapter;
++ }
+
+ /* step read_chip_version */
+ rtw_hal_read_chip_version(padapter);
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+index 0c00bb27c9c5..c764b292f6ba 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+@@ -1767,7 +1767,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+
+ while (credits) {
+ struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+- const u32 csum = (__force u32)p->csum;
++ u32 csum;
+
+ if (unlikely(!p)) {
+ pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
+@@ -1776,6 +1776,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+ break;
+ }
+
++ csum = (__force u32)p->csum;
+ if (unlikely(credits < csum)) {
+ pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
+ csk, csk->tid,
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 16c607075ede..af44e6e6b3bf 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -134,9 +134,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
+ return io;
+ }
+
+-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
++static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
+ {
+- iowrite16(value, ring_desc_base(ring) + offset);
++ /*
++ * The other 16-bits in the register is read-only and writes to it
++ * are ignored by the hardware so we can save one ioread32() by
++ * filling the read-only bits with zeroes.
++ */
++ iowrite32(cons, ring_desc_base(ring) + 8);
++}
++
++static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
++{
++ /* See ring_iowrite_cons() above for explanation */
++ iowrite32(prod << 16, ring_desc_base(ring) + 8);
+ }
+
+ static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
+@@ -188,7 +199,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
+ descriptor->sof = frame->sof;
+ }
+ ring->head = (ring->head + 1) % ring->size;
+- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
++ if (ring->is_tx)
++ ring_iowrite_prod(ring, ring->head);
++ else
++ ring_iowrite_cons(ring, ring->head);
+ }
+ }
+
+@@ -461,7 +475,7 @@ void ring_stop(struct tb_ring *ring)
+
+ ring_iowrite32options(ring, 0, 0);
+ ring_iowrite64desc(ring, 0, 0);
+- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
++ ring_iowrite32desc(ring, 0, 8);
+ ring_iowrite32desc(ring, 0, 12);
+ ring->head = 0;
+ ring->tail = 0;
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 08bd6b965847..e83dea8d6633 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -969,6 +969,11 @@ static int __init n_hdlc_init(void)
+
+ } /* end of init_module() */
+
++#ifdef CONFIG_SPARC
++#undef __exitdata
++#define __exitdata
++#endif
++
+ static const char hdlc_unregister_ok[] __exitdata =
+ KERN_INFO "N_HDLC: line discipline unregistered\n";
+ static const char hdlc_unregister_fail[] __exitdata =
+diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
+index b9c859365334..d774f6013d7b 100644
+--- a/drivers/tty/serial/owl-uart.c
++++ b/drivers/tty/serial/owl-uart.c
+@@ -754,7 +754,7 @@ static int __init owl_uart_init(void)
+ return ret;
+ }
+
+-static void __init owl_uart_exit(void)
++static void __exit owl_uart_exit(void)
+ {
+ platform_driver_unregister(&owl_uart_platform_driver);
+ uart_unregister_driver(&owl_uart_driver);
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index e48523da47ac..c1655aba131f 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -333,6 +333,7 @@ struct sc16is7xx_port {
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+ struct kthread_work irq_work;
++ struct mutex efr_lock;
+ struct sc16is7xx_one p[0];
+ };
+
+@@ -504,6 +505,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ div /= 4;
+ }
+
++ /* In an amazing feat of design, the Enhanced Features Register shares
++ * the address of the Interrupt Identification Register, and is
++ * switched in by writing a magic value (0xbf) to the Line Control
++ * Register. Any interrupt firing during this time will see the EFR
++ * where it expects the IIR to be, leading to "Unexpected interrupt"
++ * messages.
++ *
++ * Prevent this possibility by claiming a mutex while accessing the
++ * EFR, and claiming the same mutex from within the interrupt handler.
++ * This is similar to disabling the interrupt, but that doesn't work
++ * because the bulk of the interrupt processing is run as a workqueue
++ * job in thread context.
++ */
++ mutex_lock(&s->efr_lock);
++
+ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+
+ /* Open the LCR divisors for configuration */
+@@ -519,6 +535,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ /* Put LCR back to the normal mode */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
++ mutex_unlock(&s->efr_lock);
++
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_CLKSEL_BIT,
+ prescaler);
+@@ -701,6 +719,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
+ {
+ struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
+
++ mutex_lock(&s->efr_lock);
++
+ while (1) {
+ bool keep_polling = false;
+ int i;
+@@ -710,6 +730,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
+ if (!keep_polling)
+ break;
+ }
++
++ mutex_unlock(&s->efr_lock);
+ }
+
+ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+@@ -904,6 +926,9 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ if (!(termios->c_cflag & CREAD))
+ port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+
++ /* As above, claim the mutex while accessing the EFR. */
++ mutex_lock(&s->efr_lock);
++
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+@@ -925,6 +950,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ /* Update LCR register */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
++ mutex_unlock(&s->efr_lock);
++
+ /* Get baud rate generator configuration */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / 16 / 4 / 0xffff,
+@@ -1187,6 +1214,7 @@ static int sc16is7xx_probe(struct device *dev,
+ s->regmap = regmap;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
++ mutex_init(&s->efr_lock);
+
+ kthread_init_worker(&s->kworker);
+ kthread_init_work(&s->irq_work, sc16is7xx_ist);
+diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
+index 42e42e3e7a6e..388f71046849 100644
+--- a/drivers/tty/serial/serial_mctrl_gpio.c
++++ b/drivers/tty/serial/serial_mctrl_gpio.c
+@@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
+ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
+ enum mctrl_gpio_idx gidx)
+ {
++ if (gpios == NULL)
++ return NULL;
++
+ return gpios->gpio[gidx];
+ }
+ EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index b543a4730ef2..bb20aa433e98 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -104,6 +104,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ static void hub_release(struct kref *kref);
+ static int usb_reset_and_verify_device(struct usb_device *udev);
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
++static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
++ u16 portstatus);
+
+ static inline char *portspeed(struct usb_hub *hub, int portstatus)
+ {
+@@ -1110,6 +1112,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ USB_PORT_FEAT_ENABLE);
+ }
+
++ /* Make sure a warm-reset request is handled by port_event */
++ if (type == HUB_RESUME &&
++ hub_port_warm_reset_required(hub, port1, portstatus))
++ set_bit(port1, hub->event_bits);
++
+ /*
+ * Add debounce if USB3 link is in polling/link training state.
+ * Link will automatically transition to Enabled state after
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index ad315c4c6f35..4c6d612990ba 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -107,6 +107,17 @@ int usb_ep_enable(struct usb_ep *ep)
+ if (ep->enabled)
+ goto out;
+
++ /* UDC drivers can't handle endpoints with maxpacket size 0 */
++ if (usb_endpoint_maxp(ep->desc) == 0) {
++ /*
++ * We should log an error message here, but we can't call
++ * dev_err() because there's no way to find the gadget
++ * given only ep.
++ */
++ ret = -EINVAL;
++ goto out;
++ }
++
+ ret = ep->ops->enable(ep, ep->desc);
+ if (ret)
+ goto out;
+diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
+index cd92ae1231bc..6387545b17ea 100644
+--- a/drivers/usb/misc/ldusb.c
++++ b/drivers/usb/misc/ldusb.c
+@@ -498,11 +498,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
+ retval = -EFAULT;
+ goto unlock_exit;
+ }
+- dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
+-
+ retval = bytes_to_read;
+
+ spin_lock_irq(&dev->rbsl);
++ dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
++
+ if (dev->buffer_overflow) {
+ dev->buffer_overflow = 0;
+ spin_unlock_irq(&dev->rbsl);
+@@ -583,7 +583,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
+ 1 << 8, 0,
+ dev->interrupt_out_buffer,
+ bytes_to_write,
+- USB_CTRL_SET_TIMEOUT * HZ);
++ USB_CTRL_SET_TIMEOUT);
+ if (retval < 0)
+ dev_err(&dev->intf->dev,
+ "Couldn't submit HID_REQ_SET_REPORT %d\n",
+diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
+index 378a565ec989..a1ed6be87471 100644
+--- a/drivers/usb/misc/legousbtower.c
++++ b/drivers/usb/misc/legousbtower.c
+@@ -881,7 +881,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
+- if (result < sizeof(*get_version_reply)) {
++ if (result != sizeof(*get_version_reply)) {
+ if (result >= 0)
+ result = -EIO;
+ dev_err(idev, "get version request failed: %d\n", result);
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 55cebc1e6fec..163ede42af20 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -575,6 +575,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
+
+ command_port = port->serial->port[COMMAND_PORT];
+ command_info = usb_get_serial_port_data(command_port);
++
++ if (command_port->bulk_out_size < datasize + 1)
++ return -EIO;
++
+ mutex_lock(&command_info->mutex);
+ command_info->command_finished = false;
+
+@@ -648,6 +652,7 @@ static void firm_setup_port(struct tty_struct *tty)
+ struct device *dev = &port->dev;
+ struct whiteheat_port_settings port_settings;
+ unsigned int cflag = tty->termios.c_cflag;
++ speed_t baud;
+
+ port_settings.port = port->port_number + 1;
+
+@@ -708,11 +713,13 @@ static void firm_setup_port(struct tty_struct *tty)
+ dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
+
+ /* get the baud rate wanted */
+- port_settings.baud = tty_get_baud_rate(tty);
+- dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
++ baud = tty_get_baud_rate(tty);
++ port_settings.baud = cpu_to_le32(baud);
++ dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
+
+ /* fixme: should set validated settings */
+- tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
++ tty_encode_baud_rate(tty, baud, baud);
++
+ /* handle any settings that aren't specified in the tty structure */
+ port_settings.lloop = 0;
+
+diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
+index 38065df4d2d8..30169c859a74 100644
+--- a/drivers/usb/serial/whiteheat.h
++++ b/drivers/usb/serial/whiteheat.h
+@@ -91,7 +91,7 @@ struct whiteheat_simple {
+
+ struct whiteheat_port_settings {
+ __u8 port; /* port number (1 to N) */
+- __u32 baud; /* any value 7 - 460800, firmware calculates
++ __le32 baud; /* any value 7 - 460800, firmware calculates
+ best fit; arrives little endian */
+ __u8 bits; /* 5, 6, 7, or 8 */
+ __u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index afb4b0bf47b3..fd5398efce41 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -81,7 +81,6 @@ static const char* host_info(struct Scsi_Host *host)
+ static int slave_alloc (struct scsi_device *sdev)
+ {
+ struct us_data *us = host_to_us(sdev->host);
+- int maxp;
+
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+@@ -90,15 +89,6 @@ static int slave_alloc (struct scsi_device *sdev)
+ */
+ sdev->inquiry_len = 36;
+
+- /*
+- * USB has unusual scatter-gather requirements: the length of each
+- * scatterlist element except the last must be divisible by the
+- * Bulk maxpacket value. Fortunately this value is always a
+- * power of 2. Inform the block layer about this requirement.
+- */
+- maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
+- blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+-
+ /*
+ * Some host controllers may have alignment requirements.
+ * We'll play it safe by requiring 512-byte alignment always.
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 21c8925a4116..1e62f2134b3a 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -796,29 +796,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
+ {
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)sdev->host->hostdata;
+- int maxp;
+
+ sdev->hostdata = devinfo;
+
+- /*
+- * We have two requirements here. We must satisfy the requirements
+- * of the physical HC and the demands of the protocol, as we
+- * definitely want no additional memory allocation in this path
+- * ruling out using bounce buffers.
+- *
+- * For a transmission on USB to continue we must never send
+- * a package that is smaller than maxpacket. Hence the length of each
+- * scatterlist element except the last must be divisible by the
+- * Bulk maxpacket value.
+- * If the HC does not ensure that through SG,
+- * the upper layer must do that. We must assume nothing
+- * about the capabilities off the HC, so we use the most
+- * pessimistic requirement.
+- */
+-
+- maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
+- blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+-
+ /*
+ * The protocol has no requirements on alignment in the strict sense.
+ * Controllers may or may not have alignment restrictions.
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 7cde3f46ad26..e996174cbfc0 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -14,13 +14,30 @@
+ #include <linux/err.h>
+ #include <linux/fs.h>
+
++static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
++static inline char *next_non_spacetab(char *first, const char *last)
++{
++ for (; first <= last; first++)
++ if (!spacetab(*first))
++ return first;
++ return NULL;
++}
++static inline char *next_terminator(char *first, const char *last)
++{
++ for (; first <= last; first++)
++ if (spacetab(*first) || !*first)
++ return first;
++ return NULL;
++}
++
+ static int load_script(struct linux_binprm *bprm)
+ {
+ const char *i_arg, *i_name;
+- char *cp;
++ char *cp, *buf_end;
+ struct file *file;
+ int retval;
+
++ /* Not ours to exec if we don't start with "#!". */
+ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
+ return -ENOEXEC;
+
+@@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm)
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
+- /*
+- * This section does the #! interpretation.
+- * Sorta complicated, but hopefully it will work. -TYT
+- */
+-
++ /* Release since we are not mapping a binary into memory. */
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+
+- bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
+- if ((cp = strchr(bprm->buf, '\n')) == NULL)
+- cp = bprm->buf+BINPRM_BUF_SIZE-1;
++ /*
++ * This section handles parsing the #! line into separate
++ * interpreter path and argument strings. We must be careful
++ * because bprm->buf is not yet guaranteed to be NUL-terminated
++ * (though the buffer will have trailing NUL padding when the
++ * file size was smaller than the buffer size).
++ *
++ * We do not want to exec a truncated interpreter path, so either
++ * we find a newline (which indicates nothing is truncated), or
++ * we find a space/tab/NUL after the interpreter path (which
++ * itself may be preceded by spaces/tabs). Truncating the
++ * arguments is fine: the interpreter can re-read the script to
++ * parse them on its own.
++ */
++ buf_end = bprm->buf + sizeof(bprm->buf) - 1;
++ cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
++ if (!cp) {
++ cp = next_non_spacetab(bprm->buf + 2, buf_end);
++ if (!cp)
++ return -ENOEXEC; /* Entire buf is spaces/tabs */
++ /*
++ * If there is no later space/tab/NUL we must assume the
++ * interpreter path is truncated.
++ */
++ if (!next_terminator(cp, buf_end))
++ return -ENOEXEC;
++ cp = buf_end;
++ }
++ /* NUL-terminate the buffer and any trailing spaces/tabs. */
+ *cp = '\0';
+ while (cp > bprm->buf) {
+ cp--;
+diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
+index cc88f4f0325e..bed973330227 100644
+--- a/fs/cifs/netmisc.c
++++ b/fs/cifs/netmisc.c
+@@ -130,10 +130,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
+ {0, 0}
+ };
+
+-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
+- {0, 0}
+-};
+-
+ /*
+ * Convert a string containing text IPv4 or IPv6 address to binary form.
+ *
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index e70975ca723b..0f3209b23c94 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1523,6 +1523,12 @@ void f2fs_quota_off_umount(struct super_block *sb)
+ set_sbi_flag(F2FS_SB(sb), SBI_NEED_FSCK);
+ }
+ }
++ /*
++ * In case of checkpoint=disable, we must flush quota blocks.
++ * This can cause NULL exception for node_inode in end_io, since
++ * put_super already dropped it.
++ */
++ sync_filesystem(sb);
+ }
+
+ int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index d933ecb7a08c..b79bba77652a 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1650,6 +1650,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ if (attr->ia_valid & ATTR_SIZE)
+ is_truncate = true;
+
++ /* Flush dirty data/metadata before non-truncate SETATTR */
++ if (is_wb && S_ISREG(inode->i_mode) &&
++ attr->ia_valid &
++ (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
++ ATTR_TIMES_SET)) {
++ err = write_inode_now(inode, true);
++ if (err)
++ return err;
++
++ fuse_set_nowrite(inode);
++ fuse_release_nowrite(inode);
++ }
++
+ if (is_truncate) {
+ fuse_set_nowrite(inode);
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 34cbec8e6850..969584c99c54 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -201,7 +201,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
+ {
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ int err;
+- bool lock_inode = (file->f_flags & O_TRUNC) &&
++ bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
+ fc->atomic_o_trunc &&
+ fc->writeback_cache;
+
+@@ -209,16 +209,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
+ if (err)
+ return err;
+
+- if (lock_inode)
++ if (is_wb_truncate) {
+ inode_lock(inode);
++ fuse_set_nowrite(inode);
++ }
+
+ err = fuse_do_open(fc, get_node_id(inode), file, isdir);
+
+ if (!err)
+ fuse_finish_open(inode, file);
+
+- if (lock_inode)
++ if (is_wb_truncate) {
++ fuse_release_nowrite(inode);
+ inode_unlock(inode);
++ }
+
+ return err;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6409ff4876cb..af062e9f4580 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5655,6 +5655,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ }
+ status = task->tk_status;
+ if (setclientid.sc_cred) {
++ kfree(clp->cl_acceptor);
+ clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
+ put_rpccred(setclientid.sc_cred);
+ }
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3c1e46f4bce3..01b9d9341b54 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -783,7 +783,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_page *head;
+
+- atomic_long_dec(&nfsi->nrequests);
+ if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
+ head = req->wb_head;
+
+@@ -796,8 +795,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
+ spin_unlock(&mapping->private_lock);
+ }
+
+- if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
++ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
+ nfs_release_request(req);
++ atomic_long_dec(&nfsi->nrequests);
++ }
+ }
+
+ static void
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 99550f4bd159..7de0c9562b70 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -2054,7 +2054,8 @@ out_write_size:
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+ di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+- ocfs2_update_inode_fsync_trans(handle, inode, 1);
++ if (handle)
++ ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ }
+ if (handle)
+ ocfs2_journal_dirty(handle, wc->w_di_bh);
+@@ -2151,13 +2152,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
+ struct ocfs2_dio_write_ctxt *dwc = NULL;
+ struct buffer_head *di_bh = NULL;
+ u64 p_blkno;
+- loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
++ unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
++ loff_t pos = iblock << i_blkbits;
++ sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
+ unsigned len, total_len = bh_result->b_size;
+ int ret = 0, first_get_block = 0;
+
+ len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
+ len = min(total_len, len);
+
++ /*
++ * bh_result->b_size is count in get_more_blocks according to write
++ * "pos" and "end", we need map twice to return different buffer state:
++ * 1. area in file size, not set NEW;
++ * 2. area out file size, set NEW.
++ *
++ * iblock endblk
++ * |--------|---------|---------|---------
++ * |<-------area in file------->|
++ */
++
++ if ((iblock <= endblk) &&
++ ((iblock + ((len - 1) >> i_blkbits)) > endblk))
++ len = (endblk - iblock + 1) << i_blkbits;
++
+ mlog(0, "get block of %lu at %llu:%u req %u\n",
+ inode->i_ino, pos, len, total_len);
+
+@@ -2241,6 +2259,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
+ if (desc->c_needs_zero)
+ set_buffer_new(bh_result);
+
++ if (iblock > endblk)
++ set_buffer_new(bh_result);
++
+ /* May sleep in end_io. It should not happen in a irq context. So defer
+ * it to dio work queue. */
+ set_buffer_defer_completion(bh_result);
+diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
+index ab30c005cc4b..9fa98abecfc6 100644
+--- a/fs/ocfs2/ioctl.c
++++ b/fs/ocfs2/ioctl.c
+@@ -290,7 +290,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
+ if (inode_alloc)
+ inode_lock(inode_alloc);
+
+- if (o2info_coherent(&fi->ifi_req)) {
++ if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
+ status = ocfs2_inode_lock(inode_alloc, &bh, 0);
+ if (status < 0) {
+ mlog_errno(status);
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 77740ef5a8e8..eca49da6d7e0 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1497,18 +1497,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
+ return loc->xl_ops->xlo_check_space(loc, xi);
+ }
+
+-static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+-{
+- loc->xl_ops->xlo_add_entry(loc, name_hash);
+- loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+- /*
+- * We can't leave the new entry's xe_name_offset at zero or
+- * add_namevalue() will go nuts. We set it to the size of our
+- * storage so that it can never be less than any other entry.
+- */
+- loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+-}
+-
+ static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+ {
+@@ -2140,29 +2128,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
+ if (rc)
+ goto out;
+
+- if (loc->xl_entry) {
+- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+- orig_value_size = loc->xl_entry->xe_value_size;
+- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+- if (rc)
+- goto out;
+- goto alloc_value;
+- }
++ if (!loc->xl_entry) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+- orig_clusters = ocfs2_xa_value_clusters(loc);
+- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+- if (rc) {
+- mlog_errno(rc);
+- ocfs2_xa_cleanup_value_truncate(loc,
+- "overwriting",
+- orig_clusters);
+- goto out;
+- }
++ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
++ orig_value_size = loc->xl_entry->xe_value_size;
++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
++ if (rc)
++ goto out;
++ goto alloc_value;
++ }
++
++ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
++ orig_clusters = ocfs2_xa_value_clusters(loc);
++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
++ if (rc) {
++ mlog_errno(rc);
++ ocfs2_xa_cleanup_value_truncate(loc,
++ "overwriting",
++ orig_clusters);
++ goto out;
+ }
+- ocfs2_xa_wipe_namevalue(loc);
+- } else
+- ocfs2_xa_add_entry(loc, name_hash);
++ }
++ ocfs2_xa_wipe_namevalue(loc);
+
+ /*
+ * If we get here, we have a blank entry. Fill it. We grow our
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 16f93d7356b7..e4a623956df5 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1702,7 +1702,7 @@ xfs_buftarg_isolate(
+ * zero. If the value is already zero, we need to reclaim the
+ * buffer, otherwise it gets another trip through the LRU.
+ */
+- if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
++ if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+ spin_unlock(&bp->b_lock);
+ return LRU_ROTATE;
+ }
+diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
+index df528a623548..ea985aa7a6c5 100644
+--- a/include/net/llc_conn.h
++++ b/include/net/llc_conn.h
+@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
+
+ /* Access to a connection */
+ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
++void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+ void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index f59acacaa265..37876d842f2e 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -305,6 +305,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
+ return q;
+ }
+
++static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
++{
++ return rcu_dereference_bh(qdisc->dev_queue->qdisc);
++}
++
+ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
+ {
+ return qdisc->dev_queue->qdisc_sleeping;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 749a42882437..c713bd62428f 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+ /*
+ * sctp/socket.c
+ */
++int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
++ int addr_len, int flags);
+ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+ int sctp_inet_listen(struct socket *sock, int backlog);
+ void sctp_write_space(struct sock *sk);
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 14d2dbf97c53..45c2cd37fe6b 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -738,7 +738,7 @@ void vtime_account_system(struct task_struct *tsk)
+
+ write_seqcount_begin(&vtime->seqcount);
+ /* We might have scheduled out from guest path */
+- if (current->flags & PF_VCPU)
++ if (tsk->flags & PF_VCPU)
+ vtime_account_guest(tsk, vtime);
+ else
+ __vtime_account_system(tsk, vtime);
+@@ -781,7 +781,7 @@ void vtime_guest_enter(struct task_struct *tsk)
+ */
+ write_seqcount_begin(&vtime->seqcount);
+ __vtime_account_system(tsk, vtime);
+- current->flags |= PF_VCPU;
++ tsk->flags |= PF_VCPU;
+ write_seqcount_end(&vtime->seqcount);
+ }
+ EXPORT_SYMBOL_GPL(vtime_guest_enter);
+@@ -792,7 +792,7 @@ void vtime_guest_exit(struct task_struct *tsk)
+
+ write_seqcount_begin(&vtime->seqcount);
+ vtime_account_guest(tsk, vtime);
+- current->flags &= ~PF_VCPU;
++ tsk->flags &= ~PF_VCPU;
+ write_seqcount_end(&vtime->seqcount);
+ }
+ EXPORT_SYMBOL_GPL(vtime_guest_exit);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index bbe5a857c082..286bbad7681b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5764,6 +5764,7 @@ waitagain:
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
++ trace_seq_init(&iter->seq);
+ iter->pos = -1;
+
+ trace_event_read_lock();
+diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
+index 4b60f68cb492..8354ae40ec85 100644
+--- a/net/llc/llc_c_ac.c
++++ b/net/llc/llc_c_ac.c
+@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
+ llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
+ rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
+ if (likely(!rc)) {
++ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
+ llc_conn_ac_inc_vs_by_1(sk, skb);
+ }
+@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
+ llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
+ rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
+ if (likely(!rc)) {
+- rc = llc_conn_send_pdu(sk, skb);
++ skb_get(skb);
++ llc_conn_send_pdu(sk, skb);
+ llc_conn_ac_inc_vs_by_1(sk, skb);
+ }
+ return rc;
+@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
+ llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
+ rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
+ if (likely(!rc)) {
++ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
+ llc_conn_ac_inc_vs_by_1(sk, skb);
+ }
+@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
+ llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
+ rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
+ if (likely(!rc)) {
+- rc = llc_conn_send_pdu(sk, skb);
++ skb_get(skb);
++ llc_conn_send_pdu(sk, skb);
+ llc_conn_ac_inc_vs_by_1(sk, skb);
+ }
+ return rc;
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index 56c3fb5cc805..444c13e752a0 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -30,7 +30,7 @@
+ #endif
+
+ static int llc_find_offset(int state, int ev_type);
+-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
++static void llc_conn_send_pdus(struct sock *sk);
+ static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
+ static int llc_exec_conn_trans_actions(struct sock *sk,
+ struct llc_conn_state_trans *trans,
+@@ -193,11 +193,11 @@ out_skb_put:
+ return rc;
+ }
+
+-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
++void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+ {
+ /* queue PDU to send to MAC layer */
+ skb_queue_tail(&sk->sk_write_queue, skb);
+- return llc_conn_send_pdus(sk, skb);
++ llc_conn_send_pdus(sk);
+ }
+
+ /**
+@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
+ if (howmany_resend > 0)
+ llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
+ /* any PDUs to re-send are queued up; start sending to MAC */
+- llc_conn_send_pdus(sk, NULL);
++ llc_conn_send_pdus(sk);
+ out:;
+ }
+
+@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
+ if (howmany_resend > 0)
+ llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
+ /* any PDUs to re-send are queued up; start sending to MAC */
+- llc_conn_send_pdus(sk, NULL);
++ llc_conn_send_pdus(sk);
+ out:;
+ }
+
+@@ -340,16 +340,12 @@ out:
+ /**
+ * llc_conn_send_pdus - Sends queued PDUs
+ * @sk: active connection
+- * @hold_skb: the skb held by caller, or NULL if does not care
+ *
+- * Sends queued pdus to MAC layer for transmission. When @hold_skb is
+- * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
+- * successfully, or 1 for failure.
++ * Sends queued pdus to MAC layer for transmission.
+ */
+-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
++static void llc_conn_send_pdus(struct sock *sk)
+ {
+ struct sk_buff *skb;
+- int ret = 0;
+
+ while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
+ struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
+@@ -361,20 +357,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+ skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
+ if (!skb2)
+ break;
+- dev_queue_xmit(skb2);
+- } else {
+- bool is_target = skb == hold_skb;
+- int rc;
+-
+- if (is_target)
+- skb_get(skb);
+- rc = dev_queue_xmit(skb);
+- if (is_target)
+- ret = rc;
++ skb = skb2;
+ }
++ dev_queue_xmit(skb);
+ }
+-
+- return ret;
+ }
+
+ /**
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index a94bd56bcac6..7ae4cc684d3a 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
+ ev->daddr.lsap, LLC_PDU_CMD);
+ llc_pdu_init_as_ui_cmd(skb);
+ rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+- if (likely(!rc))
++ if (likely(!rc)) {
++ skb_get(skb);
+ rc = dev_queue_xmit(skb);
++ }
+ return rc;
+ }
+
+@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
+ ev->daddr.lsap, LLC_PDU_CMD);
+ llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
+ rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+- if (likely(!rc))
++ if (likely(!rc)) {
++ skb_get(skb);
+ rc = dev_queue_xmit(skb);
++ }
+ return rc;
+ }
+
+@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
+ ev->daddr.lsap, LLC_PDU_CMD);
+ llc_pdu_init_as_test_cmd(skb);
+ rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+- if (likely(!rc))
++ if (likely(!rc)) {
++ skb_get(skb);
+ rc = dev_queue_xmit(skb);
++ }
+ return rc;
+ }
+
+diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
+index d90928f50226..a7534950e60a 100644
+--- a/net/llc/llc_sap.c
++++ b/net/llc/llc_sap.c
+@@ -197,29 +197,22 @@ out:
+ * After executing actions of the event, upper layer will be indicated
+ * if needed(on receiving an UI frame). sk can be null for the
+ * datalink_proto case.
++ *
++ * This function always consumes a reference to the skb.
+ */
+ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
+ {
+ struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+
+- /*
+- * We have to hold the skb, because llc_sap_next_state
+- * will kfree it in the sending path and we need to
+- * look at the skb->cb, where we encode llc_sap_state_ev.
+- */
+- skb_get(skb);
+ ev->ind_cfm_flag = 0;
+ llc_sap_next_state(sap, skb);
+- if (ev->ind_cfm_flag == LLC_IND) {
+- if (skb->sk->sk_state == TCP_LISTEN)
+- kfree_skb(skb);
+- else {
+- llc_save_primitive(skb->sk, skb, ev->prim);
+
+- /* queue skb to the user. */
+- if (sock_queue_rcv_skb(skb->sk, skb))
+- kfree_skb(skb);
+- }
++ if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
++ llc_save_primitive(skb->sk, skb, ev->prim);
++
++ /* queue skb to the user. */
++ if (sock_queue_rcv_skb(skb->sk, skb) == 0)
++ return;
+ }
+ kfree_skb(skb);
+ }
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 016e293681b8..a980b49d7a4f 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -586,6 +586,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ case RXRPC_CALL_SERVER_PREALLOC:
+ case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_ACCEPTING:
++ rxrpc_put_call(call, rxrpc_call_put);
+ ret = -EBUSY;
+ goto error_release_sock;
+ default:
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 637949b576c6..296e95f72eb1 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1695,6 +1695,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
+ cl = cops->find(q, portid);
+ if (!cl)
+ return;
++ if (!cops->tcf_block)
++ return;
+ block = cops->tcf_block(q, cl);
+ if (!block)
+ return;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 787aa52e5991..6266121a03f9 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -469,7 +469,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ * skb will be queued.
+ */
+ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
+- struct Qdisc *rootq = qdisc_root(sch);
++ struct Qdisc *rootq = qdisc_root_bh(sch);
+ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+
+ q->duplicate = 0;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 7eb06fa75730..53a66ee1331f 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -974,7 +974,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+ .bind = inet6_bind,
+- .connect = inet_dgram_connect,
++ .connect = sctp_inet_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = inet_accept,
+ .getname = sctp_getname,
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6af871b1c297..01f88e9abbc6 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1019,7 +1019,7 @@ static const struct proto_ops inet_seqpacket_ops = {
+ .owner = THIS_MODULE,
+ .release = inet_release, /* Needs to be wrapped... */
+ .bind = inet_bind,
+- .connect = inet_dgram_connect,
++ .connect = sctp_inet_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = inet_accept,
+ .getname = inet_getname, /* Semantics are different. */
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index a18e9be77216..4045d203b7d4 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1076,7 +1076,7 @@ out:
+ */
+ static int __sctp_connect(struct sock *sk,
+ struct sockaddr *kaddrs,
+- int addrs_size,
++ int addrs_size, int flags,
+ sctp_assoc_t *assoc_id)
+ {
+ struct net *net = sock_net(sk);
+@@ -1094,7 +1094,6 @@ static int __sctp_connect(struct sock *sk,
+ union sctp_addr *sa_addr = NULL;
+ void *addr_buf;
+ unsigned short port;
+- unsigned int f_flags = 0;
+
+ sp = sctp_sk(sk);
+ ep = sp->ep;
+@@ -1244,13 +1243,7 @@ static int __sctp_connect(struct sock *sk,
+ sp->pf->to_sk_daddr(sa_addr, sk);
+ sk->sk_err = 0;
+
+- /* in-kernel sockets don't generally have a file allocated to them
+- * if all they do is call sock_create_kern().
+- */
+- if (sk->sk_socket->file)
+- f_flags = sk->sk_socket->file->f_flags;
+-
+- timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
++ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ if (assoc_id)
+ *assoc_id = asoc->assoc_id;
+@@ -1345,7 +1338,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
+ {
+ struct sockaddr *kaddrs;
+ gfp_t gfp = GFP_KERNEL;
+- int err = 0;
++ int err = 0, flags = 0;
+
+ pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
+ __func__, sk, addrs, addrs_size);
+@@ -1365,11 +1358,18 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
+ return -ENOMEM;
+
+ if (__copy_from_user(kaddrs, addrs, addrs_size)) {
+- err = -EFAULT;
+- } else {
+- err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
++ kfree(kaddrs);
++ return -EFAULT;
+ }
+
++ /* in-kernel sockets don't generally have a file allocated to them
++ * if all they do is call sock_create_kern().
++ */
++ if (sk->sk_socket->file)
++ flags = sk->sk_socket->file->f_flags;
++
++ err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
++
+ kfree(kaddrs);
+
+ return err;
+@@ -4166,31 +4166,36 @@ out_nounlock:
+ * len: the size of the address.
+ */
+ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+- int addr_len)
++ int addr_len, int flags)
+ {
+- int err = 0;
+ struct sctp_af *af;
++ int err = -EINVAL;
+
+ lock_sock(sk);
+-
+ pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
+
+ /* Validate addr_len before calling common connect/connectx routine. */
+ af = sctp_get_af_specific(addr->sa_family);
+- if (!af || addr_len < af->sockaddr_len) {
+- err = -EINVAL;
+- } else {
+- /* Pass correct addr len to common routine (so it knows there
+- * is only one address being passed.
+- */
+- err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
+- }
++ if (af && addr_len >= af->sockaddr_len)
++ err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
+
+ release_sock(sk);
+ return err;
+ }
+
++int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
++ int addr_len, int flags)
++{
++ if (addr_len < sizeof(uaddr->sa_family))
++ return -EINVAL;
++
++ if (uaddr->sa_family == AF_UNSPEC)
++ return -EOPNOTSUPP;
++
++ return sctp_connect(sock->sk, uaddr, addr_len, flags);
++}
++
+ /* FIXME: Write comments. */
+ static int sctp_disconnect(struct sock *sk, int flags)
+ {
+@@ -8298,7 +8303,6 @@ struct proto sctp_prot = {
+ .name = "SCTP",
+ .owner = THIS_MODULE,
+ .close = sctp_close,
+- .connect = sctp_connect,
+ .disconnect = sctp_disconnect,
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+@@ -8337,7 +8341,6 @@ struct proto sctpv6_prot = {
+ .name = "SCTPv6",
+ .owner = THIS_MODULE,
+ .close = sctp_close,
+- .connect = sctp_connect,
+ .disconnect = sctp_disconnect,
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index ff31feeee8e3..9627c52c3f93 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -283,7 +283,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
+ [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_MESH_ID_LEN },
+- [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
++ [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_BINARY,
++ .len = ETH_ALEN },
+
+ [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+ [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
+diff --git a/scripts/setlocalversion b/scripts/setlocalversion
+index 71f39410691b..365b3c2b8f43 100755
+--- a/scripts/setlocalversion
++++ b/scripts/setlocalversion
+@@ -73,8 +73,16 @@ scm_version()
+ printf -- '-svn%s' "`git svn find-rev $head`"
+ fi
+
+- # Check for uncommitted changes
+- if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
++ # Check for uncommitted changes.
++ # First, with git-status, but --no-optional-locks is only
++ # supported in git >= 2.14, so fall back to git-diff-index if
++ # it fails. Note that git-diff-index does not refresh the
++ # index, so it may give misleading results. See
++ # git-update-index(1), git-diff-index(1), and git-status(1).
++ if {
++ git --no-optional-locks status -uno --porcelain 2>/dev/null ||
++ git diff-index --name-only HEAD
++ } | grep -qvE '^(.. )?scripts/package'; then
+ printf '%s' -dirty
+ fi
+
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 2c0f292226d7..161ab19cb722 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -240,7 +240,8 @@ static int snd_timer_check_master(struct snd_timer_instance *master)
+ return 0;
+ }
+
+-static int snd_timer_close_locked(struct snd_timer_instance *timeri);
++static int snd_timer_close_locked(struct snd_timer_instance *timeri,
++ struct device **card_devp_to_put);
+
+ /*
+ * open a timer instance
+@@ -252,21 +253,23 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ {
+ struct snd_timer *timer;
+ struct snd_timer_instance *timeri = NULL;
++ struct device *card_dev_to_put = NULL;
+ int err;
+
++ mutex_lock(&register_mutex);
+ if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
+ /* open a slave instance */
+ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
+ tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
+ pr_debug("ALSA: timer: invalid slave class %i\n",
+ tid->dev_sclass);
+- return -EINVAL;
++ err = -EINVAL;
++ goto unlock;
+ }
+- mutex_lock(&register_mutex);
+ timeri = snd_timer_instance_new(owner, NULL);
+ if (!timeri) {
+- mutex_unlock(&register_mutex);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unlock;
+ }
+ timeri->slave_class = tid->dev_sclass;
+ timeri->slave_id = tid->device;
+@@ -274,16 +277,13 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+ err = snd_timer_check_slave(timeri);
+ if (err < 0) {
+- snd_timer_close_locked(timeri);
++ snd_timer_close_locked(timeri, &card_dev_to_put);
+ timeri = NULL;
+ }
+- mutex_unlock(&register_mutex);
+- *ti = timeri;
+- return err;
++ goto unlock;
+ }
+
+ /* open a master instance */
+- mutex_lock(&register_mutex);
+ timer = snd_timer_find(tid);
+ #ifdef CONFIG_MODULES
+ if (!timer) {
+@@ -294,25 +294,26 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ }
+ #endif
+ if (!timer) {
+- mutex_unlock(&register_mutex);
+- return -ENODEV;
++ err = -ENODEV;
++ goto unlock;
+ }
+ if (!list_empty(&timer->open_list_head)) {
+ timeri = list_entry(timer->open_list_head.next,
+ struct snd_timer_instance, open_list);
+ if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
+- mutex_unlock(&register_mutex);
+- return -EBUSY;
++ err = -EBUSY;
++ timeri = NULL;
++ goto unlock;
+ }
+ }
+ if (timer->num_instances >= timer->max_instances) {
+- mutex_unlock(&register_mutex);
+- return -EBUSY;
++ err = -EBUSY;
++ goto unlock;
+ }
+ timeri = snd_timer_instance_new(owner, timer);
+ if (!timeri) {
+- mutex_unlock(&register_mutex);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unlock;
+ }
+ /* take a card refcount for safe disconnection */
+ if (timer->card)
+@@ -321,16 +322,16 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ timeri->slave_id = slave_id;
+
+ if (list_empty(&timer->open_list_head) && timer->hw.open) {
+- int err = timer->hw.open(timer);
++ err = timer->hw.open(timer);
+ if (err) {
+ kfree(timeri->owner);
+ kfree(timeri);
++ timeri = NULL;
+
+ if (timer->card)
+- put_device(&timer->card->card_dev);
++ card_dev_to_put = &timer->card->card_dev;
+ module_put(timer->module);
+- mutex_unlock(&register_mutex);
+- return err;
++ goto unlock;
+ }
+ }
+
+@@ -338,10 +339,15 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ timer->num_instances++;
+ err = snd_timer_check_master(timeri);
+ if (err < 0) {
+- snd_timer_close_locked(timeri);
++ snd_timer_close_locked(timeri, &card_dev_to_put);
+ timeri = NULL;
+ }
++
++ unlock:
+ mutex_unlock(&register_mutex);
++ /* put_device() is called after unlock for avoiding deadlock */
++ if (card_dev_to_put)
++ put_device(card_dev_to_put);
+ *ti = timeri;
+ return err;
+ }
+@@ -351,7 +357,8 @@ EXPORT_SYMBOL(snd_timer_open);
+ * close a timer instance
+ * call this with register_mutex down.
+ */
+-static int snd_timer_close_locked(struct snd_timer_instance *timeri)
++static int snd_timer_close_locked(struct snd_timer_instance *timeri,
++ struct device **card_devp_to_put)
+ {
+ struct snd_timer *timer = NULL;
+ struct snd_timer_instance *slave, *tmp;
+@@ -403,7 +410,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+ timer->hw.close(timer);
+ /* release a card refcount for safe disconnection */
+ if (timer->card)
+- put_device(&timer->card->card_dev);
++ *card_devp_to_put = &timer->card->card_dev;
+ module_put(timer->module);
+ }
+
+@@ -415,14 +422,18 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+ */
+ int snd_timer_close(struct snd_timer_instance *timeri)
+ {
++ struct device *card_dev_to_put = NULL;
+ int err;
+
+ if (snd_BUG_ON(!timeri))
+ return -ENXIO;
+
+ mutex_lock(&register_mutex);
+- err = snd_timer_close_locked(timeri);
++ err = snd_timer_close_locked(timeri, &card_dev_to_put);
+ mutex_unlock(&register_mutex);
++ /* put_device() is called after unlock for avoiding deadlock */
++ if (card_dev_to_put)
++ put_device(card_dev_to_put);
+ return err;
+ }
+ EXPORT_SYMBOL(snd_timer_close);
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index 4d3034a68bdf..be2c056eb62d 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -253,8 +253,7 @@ end:
+ return err;
+ }
+
+-static unsigned int
+-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
++static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+ {
+ unsigned int sec, sections, ch, channels;
+ unsigned int pcm, midi, location;
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index 3377f0bc2828..778b42ba90b8 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -442,8 +442,6 @@ static void azx_int_disable(struct hdac_bus *bus)
+ list_for_each_entry(azx_dev, &bus->stream_list, list)
+ snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
+
+- synchronize_irq(bus->irq);
+-
+ /* disable SIE for all streams */
+ snd_hdac_chip_writeb(bus, INTCTL, 0);
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b42ab80ee607..96e9b3944b92 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1413,9 +1413,9 @@ static int azx_free(struct azx *chip)
+ }
+
+ if (bus->chip_init) {
+- azx_stop_chip(chip);
+ azx_clear_irq_pending(chip);
+ azx_stop_all_streams(chip);
++ azx_stop_chip(chip);
+ }
+
+ if (bus->irq >= 0)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5412952557f7..404c50ab28fa 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -375,6 +375,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ case 0x10ec0672:
+ alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
+ break;
++ case 0x10ec0623:
++ alc_update_coef_idx(codec, 0x19, 1<<13, 0);
++ break;
+ case 0x10ec0668:
+ alc_update_coef_idx(codec, 0x7, 3<<13, 0);
+ break;
+@@ -2757,6 +2760,7 @@ enum {
+ ALC269_TYPE_ALC225,
+ ALC269_TYPE_ALC294,
+ ALC269_TYPE_ALC300,
++ ALC269_TYPE_ALC623,
+ ALC269_TYPE_ALC700,
+ };
+
+@@ -2792,6 +2796,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ case ALC269_TYPE_ALC225:
+ case ALC269_TYPE_ALC294:
+ case ALC269_TYPE_ALC300:
++ case ALC269_TYPE_ALC623:
+ case ALC269_TYPE_ALC700:
+ ssids = alc269_ssids;
+ break;
+@@ -3246,7 +3251,9 @@ static void alc294_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+
+- if (!spec->done_hp_init) {
++ /* required only at boot or S4 resume time */
++ if (!spec->done_hp_init ||
++ codec->core.dev.power.power_state.event == PM_EVENT_RESTORE) {
+ alc294_hp_init(codec);
+ spec->done_hp_init = true;
+ }
+@@ -6612,6 +6619,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -7270,6 +7279,9 @@ static int patch_alc269(struct hda_codec *codec)
+ spec->codec_variant = ALC269_TYPE_ALC300;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
+ break;
++ case 0x10ec0623:
++ spec->codec_variant = ALC269_TYPE_ALC623;
++ break;
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
+@@ -8350,6 +8362,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
++ HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269),
+ HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
+ HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
+ HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
+diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
+index ed61fb3a46c0..5b2cd5e58df0 100644
+--- a/tools/lib/subcmd/Makefile
++++ b/tools/lib/subcmd/Makefile
+@@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory
+ LIBFILE = $(OUTPUT)libsubcmd.a
+
+ CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
++CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
++
++ifeq ($(DEBUG),0)
++ ifeq ($(feature-fortify-source), 1)
++ CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
++ endif
++endif
+
+ ifeq ($(CC_NO_CLANG), 0)
+ CFLAGS += -O3
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index 94a7cabe9b82..6f9f247b4516 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -342,12 +342,12 @@ static struct fixed {
+ const char *name;
+ const char *event;
+ } fixed[] = {
+- { "inst_retired.any", "event=0xc0" },
+- { "inst_retired.any_p", "event=0xc0" },
+- { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
+- { "cpu_clk_unhalted.thread", "event=0x3c" },
+- { "cpu_clk_unhalted.core", "event=0x3c" },
+- { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
++ { "inst_retired.any", "event=0xc0,period=2000003" },
++ { "inst_retired.any_p", "event=0xc0,period=2000003" },
++ { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
++ { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
++ { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
++ { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
+ { NULL, NULL},
+ };
+
+diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
+index a693bcf017ea..44c16fd11bf6 100644
+--- a/tools/perf/tests/perf-hooks.c
++++ b/tools/perf/tests/perf-hooks.c
+@@ -20,12 +20,11 @@ static void sigsegv_handler(int sig __maybe_unused)
+ static void the_hook(void *_hook_flags)
+ {
+ int *hook_flags = _hook_flags;
+- int *p = NULL;
+
+ *hook_flags = 1234;
+
+ /* Generate a segfault, test perf_hooks__recover */
+- *p = 0;
++ raise(SIGSEGV);
+ }
+
+ int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused)
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 4e7bd2750122..63db9872c880 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "symbol.h"
++#include <assert.h>
+ #include <errno.h>
+ #include <inttypes.h>
+ #include <limits.h>
+@@ -737,6 +738,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
+ }
+
+ after->start = map->end;
++ after->pgoff += map->end - pos->start;
++ assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
+ __map_groups__insert(pos->groups, after);
+ if (verbose >= 2 && !use_browser)
+ map__fprintf(after, fp);