summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1009_linux-4.10.10.patch4168
2 files changed, 4172 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 5f8d5b02..abc6f43c 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-4.10.9.patch
From: http://www.kernel.org
Desc: Linux 4.10.9
+Patch: 1009_linux-4.10.10.patch
+From: http://www.kernel.org
+Desc: Linux 4.10.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-4.10.10.patch b/1009_linux-4.10.10.patch
new file mode 100644
index 00000000..8380fc62
--- /dev/null
+++ b/1009_linux-4.10.10.patch
@@ -0,0 +1,4168 @@
+diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
+index 0b7d8576001c..2d80b60eeabe 100644
+--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
++++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
+@@ -27,6 +27,7 @@ Required properties:
+ Optional properties:
+ - clocks: reference to a clock
+ - usb3-lpm-capable: determines if platform is USB3 LPM capable
++ - quirk-broken-port-ped: set if the controller has broken port disable mechanism
+
+ Example:
+ usb@f0931000 {
+diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+index 8f3d96af81d7..1f6e101e299a 100644
+--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
++++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+@@ -6,10 +6,11 @@ occurred.
+
+ Required properties:
+ - compatible : should be one among the following
+- (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs
+- (b) "samsung,exynos5250-wdt" for Exynos5250
+- (c) "samsung,exynos5420-wdt" for Exynos5420
+- (c) "samsung,exynos7-wdt" for Exynos7
++ - "samsung,s3c2410-wdt" for S3C2410
++ - "samsung,s3c6410-wdt" for S3C6410, S5PV210 and Exynos4
++ - "samsung,exynos5250-wdt" for Exynos5250
++ - "samsung,exynos5420-wdt" for Exynos5420
++ - "samsung,exynos7-wdt" for Exynos7
+
+ - reg : base physical address of the controller and length of memory mapped
+ region.
+diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
+index 11ec2d93a5e0..61e9c78bd6d1 100644
+--- a/Documentation/process/stable-kernel-rules.rst
++++ b/Documentation/process/stable-kernel-rules.rst
+@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
+
+ .. code-block:: none
+
+- Cc: <stable@vger.kernel.org> # 3.3.x-
++ Cc: <stable@vger.kernel.org> # 3.3.x
+
+ The tag has the meaning of:
+
+diff --git a/Makefile b/Makefile
+index 4ebd511dee58..52858726495b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 10
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+@@ -370,7 +370,7 @@ LDFLAGS_MODULE =
+ CFLAGS_KERNEL =
+ AFLAGS_KERNEL =
+ LDFLAGS_vmlinux =
+-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
++CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
+ CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
+
+
+@@ -651,6 +651,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+ # Tell gcc to never replace conditional load with a non-conditional one
+ KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+
++# check for 'asm goto'
++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
++ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
++ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
++endif
++
+ include scripts/Makefile.gcc-plugins
+
+ ifdef CONFIG_READABLE_ASM
+@@ -796,12 +802,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
+ # use the deterministic mode of AR if available
+ KBUILD_ARFLAGS := $(call ar-option,D)
+
+-# check for 'asm goto'
+-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+-endif
+-
+ include scripts/Makefile.kasan
+ include scripts/Makefile.extrawarn
+ include scripts/Makefile.ubsan
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 7e45f69a0ddc..8e8d20cdbce7 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -178,6 +178,6 @@ EXPORT_SYMBOL(__pv_offset);
+ #endif
+
+ #ifdef CONFIG_HAVE_ARM_SMCCC
+-EXPORT_SYMBOL(arm_smccc_smc);
+-EXPORT_SYMBOL(arm_smccc_hvc);
++EXPORT_SYMBOL(__arm_smccc_smc);
++EXPORT_SYMBOL(__arm_smccc_hvc);
+ #endif
+diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
+index 2e48b674aab1..e5d43066b889 100644
+--- a/arch/arm/kernel/smccc-call.S
++++ b/arch/arm/kernel/smccc-call.S
+@@ -46,17 +46,19 @@ UNWIND( .fnend)
+ /*
+ * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
++ * struct arm_smccc_quirk *quirk)
+ */
+-ENTRY(arm_smccc_smc)
++ENTRY(__arm_smccc_smc)
+ SMCCC SMCCC_SMC
+-ENDPROC(arm_smccc_smc)
++ENDPROC(__arm_smccc_smc)
+
+ /*
+ * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
++ * struct arm_smccc_quirk *quirk)
+ */
+-ENTRY(arm_smccc_hvc)
++ENTRY(__arm_smccc_hvc)
+ SMCCC SMCCC_HVC
+-ENDPROC(arm_smccc_hvc)
++ENDPROC(__arm_smccc_hvc)
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index a5265edbeeab..2fd5c135e8a4 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ phys_addr_t addr = start, end = start + size;
+ phys_addr_t next;
+
++ assert_spin_locked(&kvm->mmu_lock);
+ pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ do {
+ next = stage2_pgd_addr_end(addr, end);
+ if (!stage2_pgd_none(*pgd))
+ unmap_stage2_puds(kvm, pgd, addr, next);
++ /*
++ * If the range is too large, release the kvm->mmu_lock
++ * to prevent starvation and lockup detector warnings.
++ */
++ if (next != end)
++ cond_resched_lock(&kvm->mmu_lock);
+ } while (pgd++, addr = next, addr != end);
+ }
+
+@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
++ down_read(&current->mm->mmap_sem);
+ spin_lock(&kvm->mmu_lock);
+
+ slots = kvm_memslots(kvm);
+@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+ stage2_unmap_memslot(kvm, memslot);
+
+ spin_unlock(&kvm->mmu_lock);
++ up_read(&current->mm->mmap_sem);
+ srcu_read_unlock(&kvm->srcu, idx);
+ }
+
+@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
+ if (kvm->arch.pgd == NULL)
+ return;
+
++ spin_lock(&kvm->mmu_lock);
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
++ spin_unlock(&kvm->mmu_lock);
++
+ /* Free the HW pgd, one page at a time */
+ free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
+ kvm->arch.pgd = NULL;
+@@ -1804,6 +1816,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ (KVM_PHYS_SIZE >> PAGE_SHIFT))
+ return -EFAULT;
+
++ down_read(&current->mm->mmap_sem);
+ /*
+ * A memory region could potentially cover multiple VMAs, and any holes
+ * between them, so iterate over all of them to find out if we can map
+@@ -1847,8 +1860,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ pa += vm_start - vma->vm_start;
+
+ /* IO region dirty page logging not allowed */
+- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+- return -EINVAL;
++ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+ vm_end - vm_start,
+@@ -1860,7 +1875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ } while (hva < reg_end);
+
+ if (change == KVM_MR_FLAGS_ONLY)
+- return ret;
++ goto out;
+
+ spin_lock(&kvm->mmu_lock);
+ if (ret)
+@@ -1868,6 +1883,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ else
+ stage2_flush_memslot(kvm, memslot);
+ spin_unlock(&kvm->mmu_lock);
++out:
++ up_read(&current->mm->mmap_sem);
+ return ret;
+ }
+
+diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
+index 78f368039c79..e9c4dc9e0ada 100644
+--- a/arch/arm64/kernel/arm64ksyms.c
++++ b/arch/arm64/kernel/arm64ksyms.c
+@@ -73,5 +73,5 @@ NOKPROBE_SYMBOL(_mcount);
+ #endif
+
+ /* arm-smccc */
+-EXPORT_SYMBOL(arm_smccc_smc);
+-EXPORT_SYMBOL(arm_smccc_hvc);
++EXPORT_SYMBOL(__arm_smccc_smc);
++EXPORT_SYMBOL(__arm_smccc_hvc);
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index bc049afc73a7..b3bb7ef97bc8 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -143,8 +143,11 @@ int main(void)
+ DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
+ DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
+ #endif
+- DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
+- DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
++ DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
++ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
++ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
++ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
++
+ BLANK();
+ DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
+ DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
+diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
+index ae0496fa4235..62522342e1e4 100644
+--- a/arch/arm64/kernel/smccc-call.S
++++ b/arch/arm64/kernel/smccc-call.S
+@@ -12,6 +12,7 @@
+ *
+ */
+ #include <linux/linkage.h>
++#include <linux/arm-smccc.h>
+ #include <asm/asm-offsets.h>
+
+ .macro SMCCC instr
+@@ -20,24 +21,32 @@
+ ldr x4, [sp]
+ stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+ stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+- ret
++ ldr x4, [sp, #8]
++ cbz x4, 1f /* no quirk structure */
++ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
++ cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
++ b.ne 1f
++ str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
++1: ret
+ .cfi_endproc
+ .endm
+
+ /*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
++ * struct arm_smccc_quirk *quirk)
+ */
+-ENTRY(arm_smccc_smc)
++ENTRY(__arm_smccc_smc)
+ SMCCC smc
+-ENDPROC(arm_smccc_smc)
++ENDPROC(__arm_smccc_smc)
+
+ /*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
++ * struct arm_smccc_quirk *quirk)
+ */
+-ENTRY(arm_smccc_hvc)
++ENTRY(__arm_smccc_hvc)
+ SMCCC hvc
+-ENDPROC(arm_smccc_hvc)
++ENDPROC(__arm_smccc_hvc)
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 156169c6981b..ed0f50b565c3 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -41,7 +41,20 @@
+ #include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+
+-static const char *fault_name(unsigned int esr);
++struct fault_info {
++ int (*fn)(unsigned long addr, unsigned int esr,
++ struct pt_regs *regs);
++ int sig;
++ int code;
++ const char *name;
++};
++
++static const struct fault_info fault_info[];
++
++static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
++{
++ return fault_info + (esr & 63);
++}
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+@@ -196,10 +209,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ struct pt_regs *regs)
+ {
+ struct siginfo si;
++ const struct fault_info *inf;
+
+ if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
++ inf = esr_to_fault_info(esr);
+ pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
+- tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
++ tsk->comm, task_pid_nr(tsk), inf->name, sig,
+ addr, esr);
+ show_pte(tsk->mm, addr);
+ show_regs(regs);
+@@ -218,14 +233,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
+ {
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
++ const struct fault_info *inf;
+
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+ */
+- if (user_mode(regs))
+- __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
+- else
++ if (user_mode(regs)) {
++ inf = esr_to_fault_info(esr);
++ __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
++ } else
+ __do_kernel_fault(mm, addr, esr, regs);
+ }
+
+@@ -487,12 +504,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+ return 1;
+ }
+
+-static const struct fault_info {
+- int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+- int sig;
+- int code;
+- const char *name;
+-} fault_info[] = {
++static const struct fault_info fault_info[] = {
+ { do_bad, SIGBUS, 0, "ttbr address size fault" },
+ { do_bad, SIGBUS, 0, "level 1 address size fault" },
+ { do_bad, SIGBUS, 0, "level 2 address size fault" },
+@@ -559,19 +571,13 @@ static const struct fault_info {
+ { do_bad, SIGBUS, 0, "unknown 63" },
+ };
+
+-static const char *fault_name(unsigned int esr)
+-{
+- const struct fault_info *inf = fault_info + (esr & 63);
+- return inf->name;
+-}
+-
+ /*
+ * Dispatch a data abort to the relevant handler.
+ */
+ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+ {
+- const struct fault_info *inf = fault_info + (esr & 63);
++ const struct fault_info *inf = esr_to_fault_info(esr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, esr, regs))
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 273e61225c27..07238b39638c 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
+
+ #define strlen_user(str) strnlen_user(str, 32767)
+
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+- const void __user *from,
+- unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++ unsigned long n);
+
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+- return __copy_user_zeroing(to, from, n);
+- memset(to, 0, n);
+- return n;
++ res = raw_copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+
+ extern unsigned long __must_check __copy_user(void __user *to,
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index b3ebfe9c8e88..2792fc621088 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+ COPY \
+ "1:\n" \
+ " .section .fixup,\"ax\"\n" \
+- " MOV D1Ar1,#0\n" \
+ FIXUP \
+ " MOVT D1Ar1,#HI(1b)\n" \
+ " JUMP D1Ar1,#LO(1b)\n" \
+@@ -260,27 +259,31 @@
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+- "SUB %3, %3, #32\n" \
+ "23:\n" \
+- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "SUB %3, %3, #32\n" \
+ "24:\n" \
++ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "25:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "26:\n" \
+ "SUB %3, %3, #32\n" \
+ "DCACHE [%1+#-64], D0Ar6\n" \
+ "BR $Lloop"id"\n" \
+ \
+ "MOV RAPF, %1\n" \
+- "25:\n" \
++ "27:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "26:\n" \
++ "28:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "29:\n" \
+ "SUB %3, %3, #32\n" \
+- "27:\n" \
++ "30:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "28:\n" \
++ "31:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "32:\n" \
+ "SUB %0, %0, #8\n" \
+- "29:\n" \
++ "33:\n" \
+ "SETL [%0++], D0.7, D1.7\n" \
+ "SUB %3, %3, #32\n" \
+ "1:" \
+@@ -312,11 +315,15 @@
+ " .long 26b,3b\n" \
+ " .long 27b,3b\n" \
+ " .long 28b,3b\n" \
+- " .long 29b,4b\n" \
++ " .long 29b,3b\n" \
++ " .long 30b,3b\n" \
++ " .long 31b,3b\n" \
++ " .long 32b,3b\n" \
++ " .long 33b,4b\n" \
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
+- : "D1Ar1", "D0Ar2", "memory")
++ : "D1Ar1", "D0Ar2", "cc", "memory")
+
+ /* rewind 'to' and 'from' pointers when a fault occurs
+ *
+@@ -342,7 +349,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
+ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
+ "LSR D0Ar2, D0Ar2, #8\n" \
+- "AND D0Ar2, D0Ar2, #0x7\n" \
++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
+ "ADDZ D0Ar2, D0Ar2, #4\n" \
+ "SUB D0Ar2, D0Ar2, #1\n" \
+ "MOV D1Ar1, #4\n" \
+@@ -403,47 +410,55 @@
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+- "SUB %3, %3, #16\n" \
+ "23:\n" \
+- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "24:\n" \
+- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "SUB %3, %3, #16\n" \
+- "25:\n" \
++ "24:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "26:\n" \
++ "25:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "26:\n" \
+ "SUB %3, %3, #16\n" \
+ "27:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "29:\n" \
++ "SUB %3, %3, #16\n" \
++ "30:\n" \
++ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "31:\n" \
++ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "32:\n" \
+ "SUB %3, %3, #16\n" \
+ "DCACHE [%1+#-64], D0Ar6\n" \
+ "BR $Lloop"id"\n" \
+ \
+ "MOV RAPF, %1\n" \
+- "29:\n" \
++ "33:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "30:\n" \
++ "34:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "35:\n" \
+ "SUB %3, %3, #16\n" \
+- "31:\n" \
++ "36:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "32:\n" \
++ "37:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "38:\n" \
+ "SUB %3, %3, #16\n" \
+- "33:\n" \
++ "39:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "34:\n" \
++ "40:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "41:\n" \
+ "SUB %3, %3, #16\n" \
+- "35:\n" \
++ "42:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "36:\n" \
++ "43:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "44:\n" \
+ "SUB %0, %0, #4\n" \
+- "37:\n" \
++ "45:\n" \
+ "SETD [%0++], D0.7\n" \
+ "SUB %3, %3, #16\n" \
+ "1:" \
+@@ -483,11 +498,19 @@
+ " .long 34b,3b\n" \
+ " .long 35b,3b\n" \
+ " .long 36b,3b\n" \
+- " .long 37b,4b\n" \
++ " .long 37b,3b\n" \
++ " .long 38b,3b\n" \
++ " .long 39b,3b\n" \
++ " .long 40b,3b\n" \
++ " .long 41b,3b\n" \
++ " .long 42b,3b\n" \
++ " .long 43b,3b\n" \
++ " .long 44b,3b\n" \
++ " .long 45b,4b\n" \
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
+- : "D1Ar1", "D0Ar2", "memory")
++ : "D1Ar1", "D0Ar2", "cc", "memory")
+
+ /* rewind 'to' and 'from' pointers when a fault occurs
+ *
+@@ -513,7 +536,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
+ "LSR D0Ar2, D0Ar2, #8\n" \
+- "AND D0Ar2, D0Ar2, #0x7\n" \
++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
+ "ADDZ D0Ar2, D0Ar2, #4\n" \
+ "SUB D0Ar2, D0Ar2, #1\n" \
+ "MOV D1Ar1, #4\n" \
+@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ if ((unsigned long) src & 1) {
+ __asm_copy_to_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 1) {
+ /* Worst case - byte copy */
+ while (n > 0) {
+ __asm_copy_to_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ return retn + n;
+ }
+ }
+ if (((unsigned long) src & 2) && n >= 2) {
+ __asm_copy_to_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 2) {
+ /* Second worst case - word copy */
+ while (n >= 2) {
+ __asm_copy_to_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ return retn + n;
+ }
+ }
+
+@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ while (n >= 8) {
+ __asm_copy_to_user_8x64(dst, src, retn);
+ n -= 8;
++ if (retn)
++ return retn + n;
+ }
+ }
+ if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ while (n >= 8) {
+ __asm_copy_to_user_8x64(dst, src, retn);
+ n -= 8;
++ if (retn)
++ return retn + n;
+ }
+ }
+ #endif
+@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ while (n >= 16) {
+ __asm_copy_to_user_16(dst, src, retn);
+ n -= 16;
++ if (retn)
++ return retn + n;
+ }
+
+ while (n >= 4) {
+ __asm_copy_to_user_4(dst, src, retn);
+ n -= 4;
++ if (retn)
++ return retn + n;
+ }
+
+ switch (n) {
+@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
+ break;
+ }
+
++ /*
++ * If we get here, retn correctly reflects the number of failing
++ * bytes.
++ */
+ return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
+@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
+ __asm_copy_user_cont(to, from, ret, \
+ " GETB D1Ar1,[%1++]\n" \
+ "2: SETB [%0++],D1Ar1\n", \
+- "3: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
++ "3: ADD %2,%2,#1\n", \
+ " .long 2b,3b\n")
+
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ __asm_copy_user_cont(to, from, ret, \
+ " GETW D1Ar1,[%1++]\n" \
+ "2: SETW [%0++],D1Ar1\n" COPY, \
+- "3: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
++ "3: ADD %2,%2,#2\n" FIXUP, \
+ " .long 2b,3b\n" TENTRY)
+
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
+ __asm_copy_from_user_2x_cont(to, from, ret, \
+ " GETB D1Ar1,[%1++]\n" \
+ "4: SETB [%0++],D1Ar1\n", \
+- "5: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
++ "5: ADD %2,%2,#1\n", \
+ " .long 4b,5b\n")
+
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ __asm_copy_user_cont(to, from, ret, \
+ " GETD D1Ar1,[%1++]\n" \
+ "2: SETD [%0++],D1Ar1\n" COPY, \
+- "3: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
++ "3: ADD %2,%2,#4\n" FIXUP, \
+ " .long 2b,3b\n" TENTRY)
+
+ #define __asm_copy_from_user_4(to, from, ret) \
+ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+
+-#define __asm_copy_from_user_5(to, from, ret) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "4: SETB [%0++],D1Ar1\n", \
+- "5: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "4: SETW [%0++],D1Ar1\n" COPY, \
+- "5: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+- __asm_copy_from_user_6x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "6: SETB [%0++],D1Ar1\n", \
+- "7: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "4: SETD [%0++],D1Ar1\n" COPY, \
+- "5: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "6: SETB [%0++],D1Ar1\n", \
+- "7: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "6: SETW [%0++],D1Ar1\n" COPY, \
+- "7: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret) \
+- __asm_copy_from_user_10x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "8: SETB [%0++],D1Ar1\n", \
+- "9: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "6: SETD [%0++],D1Ar1\n" COPY, \
+- "7: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "8: SETB [%0++],D1Ar1\n", \
+- "9: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "8: SETW [%0++],D1Ar1\n" COPY, \
+- "9: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+- __asm_copy_from_user_14x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "10: SETB [%0++],D1Ar1\n", \
+- "11: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "8: SETD [%0++],D1Ar1\n" COPY, \
+- "9: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+ asm volatile ( \
+ " GETL D0Ar2,D1Ar1,[%1++]\n" \
+ "2: SETL [%0++],D0Ar2,D1Ar1\n" \
+ "1:\n" \
+ " .section .fixup,\"ax\"\n" \
+- " MOV D1Ar1,#0\n" \
+- " MOV D0Ar2,#0\n" \
+ "3: ADD %2,%2,#8\n" \
+- " SETL [%0++],D0Ar2,D1Ar1\n" \
+ " MOVT D0Ar2,#HI(1b)\n" \
+ " JUMP D0Ar2,#LO(1b)\n" \
+ " .previous\n" \
+@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
+ *
+ * Rationale:
+ * A fault occurs while reading from user buffer, which is the
+- * source. Since the fault is at a single address, we only
+- * need to rewind by 8 bytes.
++ * source.
+ * Since we don't write to kernel buffer until we read first,
+ * the kernel buffer is at the right state and needn't be
+- * corrected.
++ * corrected, but the source must be rewound to the beginning of
++ * the block, which is LSM_STEP*8 bytes.
++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ * and stored in D0Ar2
++ *
++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ * a fault happens at the 4th write, LSM_STEP will be 0
++ * instead of 4. The code copes with that.
+ */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
+ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
+- "SUB %1, %1, #8\n")
++ "LSR D0Ar2, D0Ar2, #5\n" \
++ "ANDS D0Ar2, D0Ar2, #0x38\n" \
++ "ADDZ D0Ar2, D0Ar2, #32\n" \
++ "SUB %1, %1, D0Ar2\n")
+
+ /* rewind 'from' pointer when a fault occurs
+ *
+ * Rationale:
+ * A fault occurs while reading from user buffer, which is the
+- * source. Since the fault is at a single address, we only
+- * need to rewind by 4 bytes.
++ * source.
+ * Since we don't write to kernel buffer until we read first,
+ * the kernel buffer is at the right state and needn't be
+- * corrected.
++ * corrected, but the source must be rewound to the beginning of
++ * the block, which is LSM_STEP*4 bytes.
++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ * and stored in D0Ar2
++ *
++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ * a fault happens at the 4th write, LSM_STEP will be 0
++ * instead of 4. The code copes with that.
+ */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
+ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
+- "SUB %1, %1, #4\n")
++ "LSR D0Ar2, D0Ar2, #6\n" \
++ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
++ "ADDZ D0Ar2, D0Ar2, #16\n" \
++ "SUB %1, %1, D0Ar2\n")
+
+
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+- userland. The return-value is the number of bytes that were
+- inaccessible. */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+- unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++ unsigned long n)
+ {
+ register char *dst asm ("A0.2") = pdst;
+ register const char __user *src asm ("A1.2") = psrc;
+@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ if ((unsigned long) src & 1) {
+ __asm_copy_from_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 1) {
+ /* Worst case - byte copy */
+@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ __asm_copy_from_user_1(dst, src, retn);
+ n--;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+ if (((unsigned long) src & 2) && n >= 2) {
+ __asm_copy_from_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 2) {
+ /* Second worst case - word copy */
+@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ __asm_copy_from_user_2(dst, src, retn);
+ n -= 2;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+
+- /* We only need one check after the unalignment-adjustments,
+- because if both adjustments were done, either both or
+- neither reference had an exception. */
+- if (retn != 0)
+- goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+ /* 64 bit copy loop */
+ if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ __asm_copy_from_user_8x64(dst, src, retn);
+ n -= 8;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+
+@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ __asm_copy_from_user_8x64(dst, src, retn);
+ n -= 8;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+ #endif
+@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ n -= 4;
+
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+
+ /* If we get here, there were no memory read faults. */
+@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+ /* If we get here, retn correctly reflects the number of failing
+ bytes. */
+ return retn;
+-
+- copy_exception_bytes:
+- /* We already have "retn" bytes cleared, and need to clear the
+- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
+- memset is preferred here, since this isn't speed-critical code and
+- we'd rather have this a leaf-function than calling memset. */
+- {
+- char *endp;
+- for (endp = dst + n; dst < endp; dst++)
+- *dst = 0;
+- }
+-
+- return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+
+ #define __asm_clear_8x64(to, ret) \
+ asm volatile ( \
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index b3c5bde43d34..9a6e11b6f457 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1526,7 +1526,7 @@ config CPU_MIPS64_R6
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select GENERIC_CSUM
+- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
++ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 6 or later of the
+diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
+index f485afe51514..a8df44d60607 100644
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ " andi %[ticket], %[ticket], 0xffff \n"
+ " bne %[ticket], %[my_ticket], 4f \n"
+ " subu %[ticket], %[my_ticket], %[ticket] \n"
+- "2: \n"
++ "2: .insn \n"
+ " .subsection 2 \n"
+ "4: andi %[ticket], %[ticket], 0xffff \n"
+ " sll %[ticket], 5 \n"
+@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
+ " sc %[ticket], %[ticket_ptr] \n"
+ " beqz %[ticket], 1b \n"
+ " li %[ticket], 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ " .subsection 2 \n"
+ "3: b 2b \n"
+ " li %[ticket], 0 \n"
+@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+ " .set reorder \n"
+ __WEAK_LLSC_MB
+ " li %2, 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
+ : "memory");
+@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
+ " lui %1, 0x8000 \n"
+ " sc %1, %0 \n"
+ " li %2, 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
+ "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 07718bb5fc9d..12422fd4af23 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ }
+
+ decode_configs(c);
+- c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
++ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ break;
+ default:
+diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
+index dc0b29612891..52a4fdfc8513 100644
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -448,7 +448,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
+ BUILD_HANDLER reserved reserved sti verbose /* others */
+
+ .align 5
+- LEAF(handle_ri_rdhwr_vivt)
++ LEAF(handle_ri_rdhwr_tlbp)
+ .set push
+ .set noat
+ .set noreorder
+@@ -467,7 +467,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+- END(handle_ri_rdhwr_vivt)
++ END(handle_ri_rdhwr_tlbp)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 6c7f9d7e92b3..6e2487d59fee 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -81,7 +81,7 @@ extern asmlinkage void handle_dbe(void);
+ extern asmlinkage void handle_sys(void);
+ extern asmlinkage void handle_bp(void);
+ extern asmlinkage void handle_ri(void);
+-extern asmlinkage void handle_ri_rdhwr_vivt(void);
++extern asmlinkage void handle_ri_rdhwr_tlbp(void);
+ extern asmlinkage void handle_ri_rdhwr(void);
+ extern asmlinkage void handle_cpu(void);
+ extern asmlinkage void handle_ov(void);
+@@ -2352,9 +2352,18 @@ void __init trap_init(void)
+
+ set_except_vector(EXCCODE_SYS, handle_sys);
+ set_except_vector(EXCCODE_BP, handle_bp);
+- set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
+- (cpu_has_vtag_icache ?
+- handle_ri_rdhwr_vivt : handle_ri_rdhwr));
++
++ if (rdhwr_noopt)
++ set_except_vector(EXCCODE_RI, handle_ri);
++ else {
++ if (cpu_has_vtag_icache)
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
++ else if (current_cpu_type() == CPU_LOONGSON3)
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
++ else
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
++ }
++
+ set_except_vector(EXCCODE_CPU, handle_cpu);
+ set_except_vector(EXCCODE_OV, handle_ov);
+ set_except_vector(EXCCODE_TR, handle_tr);
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index 9a61671c00a7..90565477dfbd 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
+
+ if (!np_xbar)
+ panic("Failed to load xbar nodes from devicetree");
+- if (of_address_to_resource(np_pmu, 0, &res_xbar))
++ if (of_address_to_resource(np_xbar, 0, &res_xbar))
+ panic("Failed to get xbar resources");
+ if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
+ res_xbar.name) < 0)
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 88cfaf81c958..9d0107fbb169 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -1558,6 +1558,7 @@ static void probe_vcache(void)
+ vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
+
+ c->vcache.waybit = 0;
++ c->vcache.waysize = vcache_size / c->vcache.ways;
+
+ pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+ vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+@@ -1660,6 +1661,7 @@ static void __init loongson3_sc_init(void)
+ /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
+ scache_size *= 4;
+ c->scache.waybit = 0;
++ c->scache.waysize = scache_size / c->scache.ways;
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+ if (scache_size)
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 55ce39606cb8..2da5649fc545 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -762,7 +762,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
+ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ struct uasm_label **l,
+ unsigned int pte,
+- unsigned int ptr)
++ unsigned int ptr,
++ unsigned int flush)
+ {
+ #ifdef CONFIG_SMP
+ UASM_i_SC(p, pte, 0, ptr);
+@@ -771,6 +772,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ #else
+ UASM_i_SW(p, pte, 0, ptr);
+ #endif
++ if (cpu_has_ftlb && flush) {
++ BUG_ON(!cpu_has_tlbinv);
++
++ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
++ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++ build_tlb_write_entry(p, l, r, tlb_indexed);
++
++ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++ build_huge_update_entries(p, pte, ptr);
++ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
++
++ return;
++ }
++
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+ }
+@@ -2197,7 +2214,7 @@ static void build_r4000_tlb_load_handler(void)
+ uasm_l_tlbl_goaround2(&l, p);
+ }
+ uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+
+ uasm_l_nopage_tlbl(&l, p);
+@@ -2252,7 +2269,7 @@ static void build_r4000_tlb_store_handler(void)
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+
+ uasm_l_nopage_tlbs(&l, p);
+@@ -2308,7 +2325,7 @@ static void build_r4000_tlb_modify_handler(void)
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+ #endif
+
+ uasm_l_nopage_tlbm(&l, p);
+diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
+index 3e0aa09c6b55..9e4631acfcb5 100644
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+ static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+ static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+ static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+ static struct rt2880_pmx_func pci_func[] = {
+ FUNC("pci-dev", 0, 40, 32),
+ FUNC("pci-host2", 1, 40, 32),
+@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
+ FUNC("pci-fnc", 3, 40, 32)
+ };
+ static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+
+ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
+diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
+index 367c5426157b..3901b80d4420 100644
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+ return alloc_bootmem_align(size, align);
+ }
+
++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
++ bool nomap)
++{
++ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
++ return 0;
++}
++
+ void __init early_init_devtree(void *params)
+ {
+ __be32 *dtb = (u32 *)__dtb_start;
+diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
+index a3fa80d1aacc..72ef4077bf2b 100644
+--- a/arch/nios2/kernel/setup.c
++++ b/arch/nios2/kernel/setup.c
+@@ -200,6 +200,9 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #endif /* CONFIG_BLK_DEV_INITRD */
+
++ early_init_fdt_reserve_self();
++ early_init_fdt_scan_reserved_mem();
++
+ unflatten_and_copy_device_tree();
+
+ setup_cpuinfo();
+diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+index 411994551afc..f058e0c3e4d4 100644
+--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
++++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
+ }
+
+ if (len & ~VMX_ALIGN_MASK) {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
++ disable_kernel_altivec();
+ pagefault_enable();
++ preempt_enable();
+ }
+
+ tail = len & VMX_ALIGN_MASK;
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 8d58c61908f7..df88d2067348 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
+ nb = aligninfo[instr].len;
+ flags = aligninfo[instr].flags;
+
+- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+- nb = 8;
+- flags = LD+SW;
+- } else if (IS_XFORM(instruction) &&
+- ((instruction >> 1) & 0x3ff) == 660) {
+- nb = 8;
+- flags = ST+SW;
++ /*
++ * Handle some cases which give overlaps in the DSISR values.
++ */
++ if (IS_XFORM(instruction)) {
++ switch (get_xop(instruction)) {
++ case 532: /* ldbrx */
++ nb = 8;
++ flags = LD+SW;
++ break;
++ case 660: /* stdbrx */
++ nb = 8;
++ flags = ST+SW;
++ break;
++ case 20: /* lwarx */
++ case 84: /* ldarx */
++ case 116: /* lharx */
++ case 276: /* lqarx */
++ return 0; /* not emulated ever */
++ }
+ }
+
+ /* Byteswap little endian loads and stores */
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 32be2a844947..d5f2431daa5e 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -67,7 +67,7 @@ PPC64_CACHES:
+ * flush all bytes from start through stop-1 inclusive
+ */
+
+-_GLOBAL(flush_icache_range)
++_GLOBAL_TOC(flush_icache_range)
+ BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
+ blr
+@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
+ *
+ * flush all bytes from start to stop-1 inclusive
+ */
+-_GLOBAL(flush_dcache_range)
++_GLOBAL_TOC(flush_dcache_range)
+
+ /*
+ * Flush the data cache to memory
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 6824157e4d2e..18a0946837d4 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -245,6 +245,15 @@ static void cpu_ready_for_interrupts(void)
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
+
++ /*
++ * Fixup HFSCR:TM based on CPU features. The bit is set by our
++ * early asm init because at that point we haven't updated our
++ * CPU features from firmware and device-tree. Here we have,
++ * so let's do it.
++ */
++ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
++ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
++
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+ }
+diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
+index cc332608e656..65bb8f33b399 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
+ unsigned long psize = batch->psize;
+ int ssize = batch->ssize;
+ int i;
++ unsigned int use_local;
++
++ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
++ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
+
+ local_irq_save(flags);
+
+@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
+ } pte_iterate_hashed_end();
+ }
+
+- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+- mmu_psize_defs[psize].tlbiel && local) {
++ if (use_local) {
+ asm volatile("ptesync":::"memory");
+ for (i = 0; i < number; i++) {
+ vpn = batch->vpn[i];
+diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
+index 8515dd5a5663..bd90448347eb 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
+
+ unsigned long decompress_kernel(void)
+ {
+- unsigned long output_addr;
+- unsigned char *output;
++ void *output, *kernel_end;
+
+- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+- memset(&_bss, 0, &_ebss - &_bss);
+- free_mem_ptr = (unsigned long)&_end;
+- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+- output = (unsigned char *) output_addr;
++ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++ kernel_end = output + SZ__bss_start;
++ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Move the initrd right behind the end of the decompressed
+- * kernel image.
++ * kernel image. This also prevents initrd corruption caused by
++ * bss clearing since kernel_end will always be located behind the
++ * current bss section..
+ */
+- if (INITRD_START && INITRD_SIZE &&
+- INITRD_START < (unsigned long) output + SZ__bss_start) {
+- check_ipl_parmblock(output + SZ__bss_start,
+- INITRD_START + INITRD_SIZE);
+- memmove(output + SZ__bss_start,
+- (void *) INITRD_START, INITRD_SIZE);
+- INITRD_START = (unsigned long) output + SZ__bss_start;
++ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++ check_ipl_parmblock(kernel_end, INITRD_SIZE);
++ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++ INITRD_START = (unsigned long) kernel_end;
+ }
+ #endif
+
++ /*
++ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++ * initialized afterwards since they reside in bss.
++ */
++ memset(&_bss, 0, &_ebss - &_bss);
++ free_mem_ptr = (unsigned long) &_end;
++ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+ puts("Uncompressing Linux... ");
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ puts("Ok, booting the kernel.\n");
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index f82b04e85a21..7e99fb34ff23 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -144,7 +144,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
+ " jg 2b\n" \
+ ".popsection\n" \
+ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
+- : "=d" (__rc), "=Q" (*(to)) \
++ : "=d" (__rc), "+Q" (*(to)) \
+ : "d" (size), "Q" (*(from)), \
+ "d" (__reg0), "K" (-EFAULT) \
+ : "cc"); \
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 537c6647d84c..036fc03aefbd 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -54,6 +54,8 @@
+
+ static DEFINE_MUTEX(mce_chrdev_read_mutex);
+
++static int mce_chrdev_open_count; /* #times opened */
++
+ #define mce_log_get_idx_check(p) \
+ ({ \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+@@ -601,6 +603,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
+ if (atomic_read(&num_notifiers) > 2)
+ return NOTIFY_DONE;
+
++ /* Don't print when mcelog is running */
++ if (mce_chrdev_open_count > 0)
++ return NOTIFY_DONE;
++
+ __print_mce(m);
+
+ return NOTIFY_DONE;
+@@ -1871,7 +1877,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
+ */
+
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu; /* already open exclusive? */
+
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index e244c19a2451..067f9813fd2c 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
+ },
+ },
++ { /* Handle problems with rebooting on ASUS EeeBook X205TA */
++ .callback = set_acpi_reboot,
++ .ident = "ASUS EeeBook X205TA",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
++ },
++ },
++ { /* Handle problems with rebooting on ASUS EeeBook X205TAW */
++ .callback = set_acpi_reboot,
++ .ident = "ASUS EeeBook X205TAW",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
++ },
++ },
+
+ /* Certec */
+ { /* Handle problems with rebooting on Certec BPC600 */
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9764463ce833..cce7d2e3be15 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7086,13 +7086,18 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+- if (page == NULL ||
+- *(u32 *)kmap(page) != VMCS12_REVISION) {
++ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
++ return kvm_skip_emulated_instruction(vcpu);
++ }
++ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+ kunmap(page);
++ nested_release_page_clean(page);
++ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ kunmap(page);
++ nested_release_page_clean(page);
+ vmx->nested.vmxon_ptr = vmptr;
+ break;
+ case EXIT_REASON_VMCLEAR:
+diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
+index 976b1d70edbc..4ddbfd57a7c8 100644
+--- a/arch/xtensa/include/asm/page.h
++++ b/arch/xtensa/include/asm/page.h
+@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
+
+ #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
++#ifdef CONFIG_MMU
++static inline unsigned long ___pa(unsigned long va)
++{
++ unsigned long off = va - PAGE_OFFSET;
++
++ if (off >= XCHAL_KSEG_SIZE)
++ off -= XCHAL_KSEG_SIZE;
++
++ return off + PHYS_OFFSET;
++}
++#define __pa(x) ___pa((unsigned long)(x))
++#else
+ #define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
++#endif
+ #define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+ #define pfn_valid(pfn) \
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index e19f530f1083..6d5a8c1d3132 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -113,7 +113,7 @@ struct acpi_button {
+
+ static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
+ static struct acpi_device *lid_device;
+-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
++static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+
+ static unsigned long lid_report_interval __read_mostly = 500;
+ module_param(lid_report_interval, ulong, 0644);
+diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
+index fb19e1cdb641..edc8663b5db3 100644
+--- a/drivers/acpi/glue.c
++++ b/drivers/acpi/glue.c
+@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
+ return -ENODEV;
+
+ /*
+- * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+- * device ID, it is better to make it look less attractive here, so that
+- * the other device with the same _ADR value (that may not have a valid
+- * device ID) can be matched going forward. [This means a second spec
+- * violation in a row, so whatever we do here is best effort anyway.]
++ * If the device has a _HID returning a valid ACPI/PNP device ID, it is
++ * better to make it look less attractive here, so that the other device
++ * with the same _ADR value (that may not have a valid device ID) can be
++ * matched going forward. [This means a second spec violation in a row,
++ * so whatever we do here is best effort anyway.]
+ */
+- return sta_present && list_empty(&adev->pnp.ids) ?
++ return sta_present && !adev->pnp.type.platform_id ?
+ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ }
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 54abb26b7366..a4327af676fe 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -130,6 +130,12 @@ void __init acpi_nvs_nosave_s3(void)
+ nvs_nosave_s3 = true;
+ }
+
++static int __init init_nvs_save_s3(const struct dmi_system_id *d)
++{
++ nvs_nosave_s3 = false;
++ return 0;
++}
++
+ /*
+ * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
+ * user to request that behavior by using the 'acpi_old_suspend_ordering'
+@@ -324,6 +330,19 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
++ /*
++ * https://bugzilla.kernel.org/show_bug.cgi?id=189431
++ * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
++ * saving during S3.
++ */
++ {
++ .callback = init_nvs_save_s3,
++ .ident = "Lenovo G50-45",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
++ },
++ },
+ {},
+ };
+
+diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
+index 267a3d3e79f4..52f2674d5e89 100644
+--- a/drivers/ata/ahci_da850.c
++++ b/drivers/ata/ahci_da850.c
+@@ -54,11 +54,42 @@ static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
+ writel(val, ahci_base + SATA_P0PHYCR_REG);
+ }
+
++static int ahci_da850_softreset(struct ata_link *link,
++ unsigned int *class, unsigned long deadline)
++{
++ int pmp, ret;
++
++ pmp = sata_srst_pmp(link);
++
++ /*
++ * There's an issue with the SATA controller on da850 SoCs: if we
++ * enable Port Multiplier support, but the drive is connected directly
++ * to the board, it can't be detected. As a workaround: if PMP is
++ * enabled, we first call ahci_do_softreset() and pass it the result of
++ * sata_srst_pmp(). If this call fails, we retry with pmp = 0.
++ */
++ ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
++ if (pmp && ret == -EBUSY)
++ return ahci_do_softreset(link, class, 0,
++ deadline, ahci_check_ready);
++
++ return ret;
++}
++
++static struct ata_port_operations ahci_da850_port_ops = {
++ .inherits = &ahci_platform_ops,
++ .softreset = ahci_da850_softreset,
++ /*
++ * No need to override .pmp_softreset - it's only used for actual
++ * PMP-enabled ports.
++ */
++};
++
+ static const struct ata_port_info ahci_da850_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_ops,
++ .port_ops = &ahci_da850_port_ops,
+ };
+
+ static struct scsi_host_template ahci_platform_sht = {
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 1ef26403bcc8..433facfd6cb8 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2042,63 +2042,65 @@ struct ctl_table random_table[] = {
+ };
+ #endif /* CONFIG_SYSCTL */
+
+-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+-
+-int random_int_secret_init(void)
+-{
+- get_random_bytes(random_int_secret, sizeof(random_int_secret));
+- return 0;
+-}
+-
+-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
+- __aligned(sizeof(unsigned long));
++struct batched_entropy {
++ union {
++ unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
++ unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
++ };
++ unsigned int position;
++};
+
+ /*
+- * Get a random word for internal kernel use only. Similar to urandom but
+- * with the goal of minimal entropy pool depletion. As a result, the random
+- * value is not cryptographically secure but for several uses the cost of
+- * depleting entropy is too high
++ * Get a random word for internal kernel use only. The quality of the random
++ * number is either as good as RDRAND or as good as /dev/urandom, with the
++ * goal of being quite fast and not depleting entropy.
+ */
+-unsigned int get_random_int(void)
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
++unsigned long get_random_long(void)
+ {
+- __u32 *hash;
+- unsigned int ret;
++ unsigned long ret;
++ struct batched_entropy *batch;
+
+- if (arch_get_random_int(&ret))
++ if (arch_get_random_long(&ret))
+ return ret;
+
+- hash = get_cpu_var(get_random_int_hash);
+-
+- hash[0] += current->pid + jiffies + random_get_entropy();
+- md5_transform(hash, random_int_secret);
+- ret = hash[0];
+- put_cpu_var(get_random_int_hash);
+-
++ batch = &get_cpu_var(batched_entropy_long);
++ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
++ extract_crng((u8 *)batch->entropy_long);
++ batch->position = 0;
++ }
++ ret = batch->entropy_long[batch->position++];
++ put_cpu_var(batched_entropy_long);
+ return ret;
+ }
+-EXPORT_SYMBOL(get_random_int);
++EXPORT_SYMBOL(get_random_long);
+
+-/*
+- * Same as get_random_int(), but returns unsigned long.
+- */
+-unsigned long get_random_long(void)
++#if BITS_PER_LONG == 32
++unsigned int get_random_int(void)
+ {
+- __u32 *hash;
+- unsigned long ret;
++ return get_random_long();
++}
++#else
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
++unsigned int get_random_int(void)
++{
++ unsigned int ret;
++ struct batched_entropy *batch;
+
+- if (arch_get_random_long(&ret))
++ if (arch_get_random_int(&ret))
+ return ret;
+
+- hash = get_cpu_var(get_random_int_hash);
+-
+- hash[0] += current->pid + jiffies + random_get_entropy();
+- md5_transform(hash, random_int_secret);
+- ret = *(unsigned long *)hash;
+- put_cpu_var(get_random_int_hash);
+-
++ batch = &get_cpu_var(batched_entropy_int);
++ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
++ extract_crng((u8 *)batch->entropy_int);
++ batch->position = 0;
++ }
++ ret = batch->entropy_int[batch->position++];
++ put_cpu_var(batched_entropy_int);
+ return ret;
+ }
+-EXPORT_SYMBOL(get_random_long);
++#endif
++EXPORT_SYMBOL(get_random_int);
+
+ /**
+ * randomize_page - Generate a random, page aligned address
+diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
+index 4a0f5ead4fb5..1e2e5198db53 100644
+--- a/drivers/firmware/qcom_scm-64.c
++++ b/drivers/firmware/qcom_scm-64.c
+@@ -91,6 +91,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
++ struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+
+ if (unlikely(arglen > N_REGISTER_ARGS)) {
+ alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
+@@ -131,10 +132,16 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
++ quirk.state.a6 = 0;
++
+ do {
+- arm_smccc_smc(cmd, desc->arginfo, desc->args[0],
+- desc->args[1], desc->args[2], x5, 0, 0,
+- res);
++ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
++ desc->args[1], desc->args[2], x5,
++ quirk.state.a6, 0, res, &quirk);
++
++ if (res->a0 == QCOM_SCM_INTERRUPTED)
++ cmd = res->a0;
++
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+
+ mutex_unlock(&qcom_scm_lock);
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index a3faefa44f68..d3f9f028a37b 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -572,8 +572,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
+ }
+
+ desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
+- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
++ if (!IS_ERR(desc))
+ break;
++ if (PTR_ERR(desc) == -EPROBE_DEFER)
++ return ERR_CAST(desc);
+ }
+
+ /* Then from plain _CRS GPIOs */
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index ec6474b01dbc..7cce86933000 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -90,7 +90,7 @@ struct detailed_mode_closure {
+ #define LEVEL_GTF2 2
+ #define LEVEL_CVT 3
+
+-static struct edid_quirk {
++static const struct edid_quirk {
+ char vendor[4];
+ int product_id;
+ u32 quirks;
+@@ -1480,7 +1480,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+-static bool edid_vendor(struct edid *edid, char *vendor)
++static bool edid_vendor(struct edid *edid, const char *vendor)
+ {
+ char edid_vendor[3];
+
+@@ -1500,7 +1500,7 @@ static bool edid_vendor(struct edid *edid, char *vendor)
+ */
+ static u32 edid_get_quirks(struct edid *edid)
+ {
+- struct edid_quirk *quirk;
++ const struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
+index 325cb9b55989..5f30a0716531 100644
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
+@@ -1422,7 +1422,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ {
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+- int ret;
++ int idx, ret;
+ bool kthread = current->mm == NULL;
+
+ if (!handle_valid(handle))
+@@ -1434,8 +1434,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ if (kthread)
+ use_mm(kvm->mm);
+
++ idx = srcu_read_lock(&kvm->srcu);
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
++ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (kthread)
+ unuse_mm(kvm->mm);
+diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
+index 678b0be85376..3635dbe328ef 100644
+--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
++++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
+@@ -101,7 +101,7 @@ struct tbs_sched_data {
+ struct list_head runq_head;
+ };
+
+-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
++#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
+
+ static void tbs_sched_func(struct work_struct *work)
+ {
+@@ -224,7 +224,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+- schedule_delayed_work(&sched_data->work, sched_data->period);
++ schedule_delayed_work(&sched_data->work, 0);
+ }
+
+ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index fce8e198bc76..08e274e16165 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
+ INTEL_VLV_IDS(&intel_valleyview_info),
+ INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+ INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
++ INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
+ INTEL_CHV_IDS(&intel_cherryview_info),
+ INTEL_SKL_GT1_IDS(&intel_skylake_info),
+ INTEL_SKL_GT2_IDS(&intel_skylake_info),
+diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
+index 1f2f9ca25901..4556e2b13ac5 100644
+--- a/drivers/gpu/drm/mga/mga_dma.c
++++ b/drivers/gpu/drm/mga/mga_dma.c
+@@ -392,6 +392,24 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
+ drm_mga_private_t *dev_priv;
+ int ret;
+
++ /* There are PCI versions of the G450. These cards have the
++ * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
++ * bridge chip. We detect these cards, which are not currently
++ * supported by this driver, by looking at the device ID of the
++ * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
++ * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
++ * device.
++ */
++ if ((dev->pdev->device == 0x0525) && dev->pdev->bus->self
++ && (dev->pdev->bus->self->vendor == 0x3388)
++ && (dev->pdev->bus->self->device == 0x0021)
++ && dev->agp) {
++ /* FIXME: This should be quirked in the pci core, but oh well
++ * the hw probably stopped existing. */
++ arch_phys_wc_del(dev->agp->agp_mtrr);
++ kfree(dev->agp);
++ dev->agp = NULL;
++ }
+ dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
+ if (!dev_priv)
+ return -ENOMEM;
+@@ -698,7 +716,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
+ static int mga_do_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
+ {
+- const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
++ const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
+ int err;
+ drm_mga_private_t *const dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
+index 25b2a1a424e6..63ba0699d107 100644
+--- a/drivers/gpu/drm/mga/mga_drv.c
++++ b/drivers/gpu/drm/mga/mga_drv.c
+@@ -37,8 +37,6 @@
+
+ #include <drm/drm_pciids.h>
+
+-static int mga_driver_device_is_agp(struct drm_device *dev);
+-
+ static struct pci_device_id pciidlist[] = {
+ mga_PCI_IDS
+ };
+@@ -66,7 +64,6 @@ static struct drm_driver driver = {
+ .lastclose = mga_driver_lastclose,
+ .set_busid = drm_pci_set_busid,
+ .dma_quiescent = mga_driver_dma_quiescent,
+- .device_is_agp = mga_driver_device_is_agp,
+ .get_vblank_counter = mga_get_vblank_counter,
+ .enable_vblank = mga_enable_vblank,
+ .disable_vblank = mga_disable_vblank,
+@@ -107,37 +104,3 @@ module_exit(mga_exit);
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL and additional rights");
+-
+-/**
+- * Determine if the device really is AGP or not.
+- *
+- * In addition to the usual tests performed by \c drm_device_is_agp, this
+- * function detects PCI G450 cards that appear to the system exactly like
+- * AGP G450 cards.
+- *
+- * \param dev The device to be tested.
+- *
+- * \returns
+- * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
+- */
+-static int mga_driver_device_is_agp(struct drm_device *dev)
+-{
+- const struct pci_dev *const pdev = dev->pdev;
+-
+- /* There are PCI versions of the G450. These cards have the
+- * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+- * bridge chip. We detect these cards, which are not currently
+- * supported by this driver, by looking at the device ID of the
+- * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
+- * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+- * device.
+- */
+-
+- if ((pdev->device == 0x0525) && pdev->bus->self
+- && (pdev->bus->self->vendor == 0x3388)
+- && (pdev->bus->self->device == 0x0021)) {
+- return 0;
+- }
+-
+- return 2;
+-}
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index b8647198c11c..657874077400 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -846,7 +846,9 @@ static const struct adreno_gpu_funcs funcs = {
+ .idle = a5xx_idle,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
++#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
++#endif
+ },
+ .get_timestamp = a5xx_get_timestamp,
+ };
+diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
+index 4f5fa8d65fe9..144367c0c28f 100644
+--- a/drivers/gpu/drm/ttm/ttm_object.c
++++ b/drivers/gpu/drm/ttm/ttm_object.c
+@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
+
+ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+- enum ttm_ref_type ref_type, bool *existed)
++ enum ttm_ref_type ref_type, bool *existed,
++ bool require_existed)
+ {
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ }
+
+ rcu_read_unlock();
++ if (require_existed)
++ return -EPERM;
++
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ prime = (struct ttm_prime_object *) dma_buf->priv;
+ base = &prime->base;
+ *handle = base->hash.key;
+- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+
+ dma_buf_put(dma_buf);
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 6541dd8b82dc..6b2708b4eafe 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
+ struct vmw_fence_obj **p_fence)
+ {
+ struct vmw_fence_obj *fence;
+- int ret;
++ int ret;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (unlikely(fence == NULL))
+@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
+ }
+
+
++/**
++ * vmw_fence_obj_lookup - Look up a user-space fence object
++ *
++ * @tfile: A struct ttm_object_file identifying the caller.
++ * @handle: A handle identifying the fence object.
++ * @return: A struct vmw_user_fence base ttm object on success or
++ * an error pointer on failure.
++ *
++ * The fence object is looked up and type-checked. The caller needs
++ * to have opened the fence object first, but since that happens on
++ * creation and fence objects aren't shareable, that's not an
++ * issue currently.
++ */
++static struct ttm_base_object *
++vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
++{
++ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
++
++ if (!base) {
++ pr_err("Invalid fence object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (base->refcount_release != vmw_user_fence_base_release) {
++ pr_err("Invalid fence object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ ttm_base_object_unref(&base);
++ return ERR_PTR(-EINVAL);
++ }
++
++ return base;
++}
++
++
+ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ arg->kernel_cookie = jiffies + wait_timeout;
+ }
+
+- base = ttm_base_object_lookup(tfile, arg->handle);
+- if (unlikely(base == NULL)) {
+- printk(KERN_ERR "Wait invalid fence object handle "
+- "0x%08lx.\n",
+- (unsigned long)arg->handle);
+- return -EINVAL;
+- }
++ base = vmw_fence_obj_lookup(tfile, arg->handle);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+
+@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+- base = ttm_base_object_lookup(tfile, arg->handle);
+- if (unlikely(base == NULL)) {
+- printk(KERN_ERR "Fence signaled invalid fence object handle "
+- "0x%08lx.\n",
+- (unsigned long)arg->handle);
+- return -EINVAL;
+- }
++ base = vmw_fence_obj_lookup(tfile, arg->handle);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+ fman = fman_from_fence(fence);
+@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ (struct drm_vmw_fence_event_arg *) data;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++ struct ttm_object_file *tfile = vmw_fp->tfile;
+ struct drm_vmw_fence_rep __user *user_fence_rep =
+ (struct drm_vmw_fence_rep __user *)(unsigned long)
+ arg->fence_rep;
+@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ */
+ if (arg->handle) {
+ struct ttm_base_object *base =
+- ttm_base_object_lookup_for_ref(dev_priv->tdev,
+- arg->handle);
+-
+- if (unlikely(base == NULL)) {
+- DRM_ERROR("Fence event invalid fence object handle "
+- "0x%08lx.\n",
+- (unsigned long)arg->handle);
+- return -EINVAL;
+- }
++ vmw_fence_obj_lookup(tfile, arg->handle);
++
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
+ fence = &(container_of(base, struct vmw_user_fence,
+ base)->fence);
+ (void) vmw_fence_obj_reference(fence);
+
+ if (user_fence_rep != NULL) {
+- bool existed;
+-
+ ret = ttm_ref_object_add(vmw_fp->tfile, base,
+- TTM_REF_USAGE, &existed);
++ TTM_REF_USAGE, NULL, false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to reference a fence "
+ "object.\n");
+@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ return 0;
+ out_no_create:
+ if (user_fence_rep != NULL)
+- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+- handle, TTM_REF_USAGE);
++ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ out_no_ref_obj:
+ vmw_fence_obj_unreference(&fence);
+ return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index b8c6a03c8c54..5ec24fd801cd 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ param->value = dev_priv->has_dx;
+ break;
+ default:
+- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+- param->param);
+ return -EINVAL;
+ }
+
+@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+ bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+
+- if (unlikely(arg->pad64 != 0)) {
++ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
+ DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 8e86d6d4141b..53fa9f1c1d10 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+- TTM_REF_SYNCCPU_WRITE, &existed);
++ TTM_REF_SYNCCPU_WRITE, &existed, false);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+
+ *handle = user_bo->prime.base.hash.key;
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+- TTM_REF_USAGE, NULL);
++ TTM_REF_USAGE, NULL, false);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index b445ce9b9757..05fa092c942b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 128;
+
+ num_sizes = 0;
+- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
++ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
++ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
++ return -EINVAL;
+ num_sizes += req->mip_levels[i];
++ }
+
+- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+- DRM_VMW_MAX_MIP_LEVELS)
++ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
++ num_sizes == 0)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
+ uint32_t handle;
+ struct ttm_base_object *base;
+ int ret;
++ bool require_exist = false;
+
+ if (handle_type == DRM_VMW_HANDLE_PRIME) {
+ ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+ if (unlikely(ret != 0))
+ return ret;
+ } else {
+- if (unlikely(drm_is_render_client(file_priv))) {
+- DRM_ERROR("Render client refused legacy "
+- "surface reference.\n");
+- return -EACCES;
+- }
++ if (unlikely(drm_is_render_client(file_priv)))
++ require_exist = true;
++
+ if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ DRM_ERROR("Locked master refused legacy "
+ "surface reference.\n");
+@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
+
+ /*
+ * Make sure the surface creator has the same
+- * authenticating master.
++ * authenticating master, or is already registered with us.
+ */
+ if (drm_is_primary_client(file_priv) &&
+- user_srf->master != file_priv->master) {
+- DRM_ERROR("Trying to reference surface outside of"
+- " master domain.\n");
+- ret = -EACCES;
+- goto out_bad_resource;
+- }
++ user_srf->master != file_priv->master)
++ require_exist = true;
+
+- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
++ require_exist);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_bad_resource;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 672145b0d8f5..6ef4f2fcfe43 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3290,6 +3290,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
+ {
+ struct wacom_features *features = &wacom_wac->features;
+
++ if ((features->type == HID_GENERIC) && features->numbered_buttons > 0)
++ features->device_type |= WACOM_DEVICETYPE_PAD;
++
+ if (!(features->device_type & WACOM_DEVICETYPE_PAD))
+ return -ENODEV;
+
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index f7fcfa886f72..821919dd245b 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -27,6 +27,7 @@
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iio/triggered_buffer.h>
+ #include <linux/regmap.h>
++#include <linux/delay.h>
+ #include "bmg160.h"
+
+ #define BMG160_IRQ_NAME "bmg160_event"
+@@ -52,6 +53,9 @@
+ #define BMG160_DEF_BW 100
+ #define BMG160_REG_PMU_BW_RES BIT(7)
+
++#define BMG160_GYRO_REG_RESET 0x14
++#define BMG160_GYRO_RESET_VAL 0xb6
++
+ #define BMG160_REG_INT_MAP_0 0x17
+ #define BMG160_INT_MAP_0_BIT_ANY BIT(1)
+
+@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
+ int ret;
+ unsigned int val;
+
++ /*
++ * Reset chip to get it in a known good state. A delay of 30ms after
++ * reset is required according to the datasheet.
++ */
++ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
++ BMG160_GYRO_RESET_VAL);
++ usleep_range(30000, 30700);
++
+ ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_chip_id\n");
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index aaca42862389..d9c15e411e10 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -608,10 +608,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
+ tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ case IIO_VAL_FRACTIONAL_LOG2:
+- tmp = (s64)vals[0] * 1000000000LL >> vals[1];
+- tmp1 = do_div(tmp, 1000000000LL);
+- tmp0 = tmp;
+- return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
++ tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
++ tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
++ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ case IIO_VAL_INT_MULTIPLE:
+ {
+ int i;
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index e19e0787864c..f82560a4f772 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -455,6 +455,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = true,
++ .bootime = 2,
+ },
+ };
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 4a157b0f4155..fd4f3ace200b 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3594,7 +3594,7 @@ static int raid_preresume(struct dm_target *ti)
+ return r;
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
+- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
++ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
+ r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
+ to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 0f0eb8a3d922..78f36012eaca 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ block = fec_buffer_rs_block(v, fio, n, i);
+ res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+ if (res < 0) {
+- dm_bufio_release(buf);
+-
+ r = res;
+ goto error;
+ }
+@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ done:
+ r = corrected;
+ error:
++ dm_bufio_release(buf);
++
+ if (r < 0 && neras)
+ DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
+ v->data_dev->name, (unsigned long long)rsb, r);
+@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ &is_zero) == 0) {
+ /* skip known zero blocks entirely */
+ if (is_zero)
+- continue;
++ goto done;
+
+ /*
+ * skip if we have already found the theoretical
+@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ if (!verity_fec_is_enabled(v))
+ return -EOPNOTSUPP;
+
++ if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
++ DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
++ return -EIO;
++ }
++
++ fio->level++;
++
+ if (type == DM_VERITY_BLOCK_TYPE_METADATA)
+ block += v->data_blocks;
+
+@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ if (r < 0) {
+ r = fec_decode_rsb(v, io, fio, rsb, offset, true);
+ if (r < 0)
+- return r;
++ goto done;
+ }
+
+ if (dest)
+@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ r = verity_for_bv_block(v, io, iter, fec_bv_copy);
+ }
+
++done:
++ fio->level--;
+ return r;
+ }
+
+@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
+ memset(fio->bufs, 0, sizeof(fio->bufs));
+ fio->nbufs = 0;
+ fio->output = NULL;
++ fio->level = 0;
+ }
+
+ /*
+diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
+index 7fa0298b995e..bb31ce87a933 100644
+--- a/drivers/md/dm-verity-fec.h
++++ b/drivers/md/dm-verity-fec.h
+@@ -27,6 +27,9 @@
+ #define DM_VERITY_FEC_BUF_MAX \
+ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+
++/* maximum recursion level for verity_fec_decode */
++#define DM_VERITY_FEC_MAX_RECURSION 4
++
+ #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
+ #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
+ #define DM_VERITY_OPT_FEC_START "fec_start"
+@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
+ unsigned nbufs; /* number of buffers allocated */
+ u8 *output; /* buffer for corrected output */
+ size_t output_pos;
++ unsigned level; /* recursion level */
+ };
+
+ #ifdef CONFIG_DM_VERITY_FEC
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 9a6eb4492172..364f6b87a728 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -569,16 +569,19 @@ static const struct sdhci_ops sdhci_esdhc_le_ops = {
+ };
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
+- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+- | SDHCI_QUIRK_NO_CARD_NO_RESET
+- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++ .quirks = ESDHC_DEFAULT_QUIRKS |
++#ifdef CONFIG_PPC
++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
++#endif
++ SDHCI_QUIRK_NO_CARD_NO_RESET |
++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .ops = &sdhci_esdhc_be_ops,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
+- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+- | SDHCI_QUIRK_NO_CARD_NO_RESET
+- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++ .quirks = ESDHC_DEFAULT_QUIRKS |
++ SDHCI_QUIRK_NO_CARD_NO_RESET |
++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .ops = &sdhci_esdhc_le_ops,
+ };
+
+@@ -643,8 +646,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ of_device_is_compatible(np, "fsl,p5020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p4080-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1020-esdhc") ||
+- of_device_is_compatible(np, "fsl,t1040-esdhc") ||
+- of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
++ of_device_is_compatible(np, "fsl,t1040-esdhc"))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+index de19c7c92bc6..85d949e03f79 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
++ enum nl80211_iftype iftype;
+ bool wait_for_disable = false;
+ int err;
+
+ brcmf_dbg(TRACE, "delete P2P vif\n");
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
++ iftype = vif->wdev.iftype;
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+- switch (vif->wdev.iftype) {
++ switch (iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+ wait_for_disable = true;
+@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+ BRCMF_P2P_DISABLE_TIMEOUT);
+
+ err = 0;
+- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
++ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ brcmf_vif_clear_mgmt_ies(vif);
+ err = brcmf_p2p_release_p2p_if(vif);
+ }
+@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+ brcmf_remove_interface(vif->ifp, true);
+
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
++ if (iftype != NL80211_IFTYPE_P2P_DEVICE)
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+ return err;
+diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
+index e354010fb006..cea581414e10 100644
+--- a/drivers/pci/host/pci-thunder-pem.c
++++ b/drivers/pci/host/pci-thunder-pem.c
+@@ -14,6 +14,7 @@
+ * Copyright (C) 2015 - 2016 Cavium, Inc.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/of_address.h>
+@@ -319,6 +320,49 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
++#define PEM_RES_BASE 0x87e0c0000000UL
++#define PEM_NODE_MASK GENMASK(45, 44)
++#define PEM_INDX_MASK GENMASK(26, 24)
++#define PEM_MIN_DOM_IN_NODE 4
++#define PEM_MAX_DOM_IN_NODE 10
++
++static void thunder_pem_reserve_range(struct device *dev, int seg,
++ struct resource *r)
++{
++ resource_size_t start = r->start, end = r->end;
++ struct resource *res;
++ const char *regionid;
++
++ regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
++ if (!regionid)
++ return;
++
++ res = request_mem_region(start, end - start + 1, regionid);
++ if (res)
++ res->flags &= ~IORESOURCE_BUSY;
++ else
++ kfree(regionid);
++
++ dev_info(dev, "%pR %s reserved\n", r,
++ res ? "has been" : "could not be");
++}
++
++static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
++ struct resource *res_pem)
++{
++ int node = acpi_get_node(root->device->handle);
++ int index;
++
++ if (node == NUMA_NO_NODE)
++ node = 0;
++
++ index = root->segment - PEM_MIN_DOM_IN_NODE;
++ index -= node * PEM_MAX_DOM_IN_NODE;
++ res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
++ FIELD_PREP(PEM_INDX_MASK, index);
++ res_pem->flags = IORESOURCE_MEM;
++}
++
+ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+ {
+ struct device *dev = cfg->parent;
+@@ -332,9 +376,23 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
++
++ /*
++ * If we fail to gather resources it means that we run with old
++ * FW where we need to calculate PEM-specific resources manually.
++ */
+ if (ret) {
+- dev_err(dev, "can't get rc base address\n");
+- return ret;
++ thunder_pem_legacy_fw(root, res_pem);
++ /*
++ * Reserve 64K size PEM specific resources. The full 16M range
++ * size is required for thunder_pem_init() call.
++ */
++ res_pem->end = res_pem->start + SZ_64K - 1;
++ thunder_pem_reserve_range(dev, root->segment, res_pem);
++ res_pem->end = res_pem->start + SZ_16M - 1;
++
++ /* Reserve PCI configuration space as well. */
++ thunder_pem_reserve_range(dev, root->segment, &cfg->res);
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
+index 7c3b54b9eb17..142a1669dd82 100644
+--- a/drivers/pci/host/pci-xgene.c
++++ b/drivers/pci/host/pci-xgene.c
+@@ -246,14 +246,11 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+ ret = xgene_get_csr_resource(adev, &csr);
+ if (ret) {
+ dev_err(dev, "can't get CSR resource\n");
+- kfree(port);
+ return ret;
+ }
+ port->csr_base = devm_ioremap_resource(dev, &csr);
+- if (IS_ERR(port->csr_base)) {
+- kfree(port);
+- return -ENOMEM;
+- }
++ if (IS_ERR(port->csr_base))
++ return PTR_ERR(port->csr_base);
+
+ port->cfg_base = cfg->win;
+ port->version = ipversion;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 024def5bb3fa..a171762048e7 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1634,6 +1634,7 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
+
+
+ /*
+@@ -2240,6 +2241,27 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5719,
+ quirk_brcm_5719_limit_mrrs);
+
++#ifdef CONFIG_PCIE_IPROC_PLATFORM
++static void quirk_paxc_bridge(struct pci_dev *pdev)
++{
++ /* The PCI config space is shared with the PAXC root port and the first
++ * Ethernet device. So, we need to workaround this by telling the PCI
++ * code that the bridge is not an Ethernet device.
++ */
++ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
++ pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
++
++ /* MPSS is not being set properly (as it is currently 0). This is
++ * because that area of the PCI config space is hard coded to zero, and
++ * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
++ * so that the MPS can be set to the real max value.
++ */
++ pdev->pcie_mpss = 2;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
++#endif
++
+ /* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+@@ -3114,30 +3136,32 @@ static void quirk_remove_d3_delay(struct pci_dev *dev)
+ {
+ dev->d3_delay = 0;
+ }
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
++/* C600 Series devices do not need 10ms d3_delay */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
++/* Lynxpoint-H PCH devices do not need 10ms d3_delay */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+ /* Intel Cherrytrail devices do not need 10ms d3_delay */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+
+ /*
+ * Some devices may pass our check in pci_intx_mask_supported() if
+@@ -4137,6 +4161,26 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+ }
+
+ /*
++ * These QCOM root ports do provide ACS-like features to disable peer
++ * transactions and validate bus numbers in requests, but do not provide an
++ * actual PCIe ACS capability. Hardware supports source validation but it
++ * will report the issue as Completer Abort instead of ACS Violation.
++ * Hardware doesn't support peer-to-peer and each root port is a root
++ * complex with unique segment numbers. It is not possible for one root
++ * port to pass traffic to another root port. All PCIe transactions are
++ * terminated inside the root port.
++ */
++static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
++{
++ u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
++ int ret = acs_flags & ~flags ? 0 : 1;
++
++ dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret);
++
++ return ret;
++}
++
++/*
+ * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
+ * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
+ * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
+@@ -4151,15 +4195,35 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+ *
+ * N.B. This doesn't fix what lspci shows.
+ *
++ * The 100 series chipset specification update includes this as errata #23[3].
++ *
++ * The 200 series chipset (Union Point) has the same bug according to the
++ * specification update (Intel 200 Series Chipset Family Platform Controller
++ * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001,
++ * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this
++ * chipset include:
++ *
++ * 0xa290-0xa29f PCI Express Root port #{0-16}
++ * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
++ *
+ * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
++ * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
++ * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
++ * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ */
+ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+ {
+- return pci_is_pcie(dev) &&
+- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+- ((dev->device & ~0xf) == 0xa110 ||
+- (dev->device >= 0xa167 && dev->device <= 0xa16a));
++ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
++ return false;
++
++ switch (dev->device) {
++ case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
++ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
++ return true;
++ }
++
++ return false;
+ }
+
+ #define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
+@@ -4272,6 +4336,9 @@ static const struct pci_dev_acs_enabled {
+ /* I219 */
+ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
++ /* QCOM QDF2xxx root ports */
++ { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
++ { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
+ /* Intel PCH root ports */
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 43cb680adbb4..8499d3ae4257 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
+ #define USB_INTEL_XUSB2PR 0xD0
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+
++static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
++
+ struct bios_args {
+ u32 arg0;
+ u32 arg1;
+@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
+ return 0;
+ }
+
++static bool ashs_present(void)
++{
++ int i = 0;
++ while (ashs_ids[i]) {
++ if (acpi_dev_found(ashs_ids[i++]))
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * WMI Driver
+ */
+@@ -2095,6 +2107,13 @@ static int asus_wmi_add(struct platform_device *pdev)
+ if (err)
+ goto fail_leds;
+
++ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
++ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
++ asus->driver->wlan_ctrl_by_user = 1;
++
++ if (asus->driver->wlan_ctrl_by_user && ashs_present())
++ asus->driver->quirks->no_rfkill = 1;
++
+ if (!asus->driver->quirks->no_rfkill) {
+ err = asus_wmi_rfkill_init(asus);
+ if (err)
+@@ -2134,10 +2153,6 @@ static int asus_wmi_add(struct platform_device *pdev)
+ if (err)
+ goto fail_debugfs;
+
+- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+- asus->driver->wlan_ctrl_by_user = 1;
+-
+ return 0;
+
+ fail_debugfs:
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 7cbad0d45b9c..6ba270e0494d 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
++ vmfile->f_mode |= FMODE_LSEEK;
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 61ad6c3b20a0..f4eb807a2616 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1075,15 +1075,15 @@ static int omap8250_no_handle_irq(struct uart_port *port)
+ }
+
+ static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+-static const u8 am4372_habit = UART_ERRATA_CLOCK_DISABLE;
++static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
+
+ static const struct of_device_id omap8250_dt_ids[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+- { .compatible = "ti,am4372-uart", .data = &am4372_habit, },
+- { .compatible = "ti,dra742-uart", .data = &am4372_habit, },
++ { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
++ { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
+@@ -1218,9 +1218,6 @@ static int omap8250_probe(struct platform_device *pdev)
+ priv->omap8250_dma.rx_size = RX_TRIGGER;
+ priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
+ priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
+-
+- if (of_machine_is_compatible("ti,am33xx"))
+- priv->habit |= OMAP_DMA_TX_KICK;
+ /*
+ * pause is currently not supported atleast on omap-sdma
+ * and edma on most earlier kernels.
+diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
+index 3889809fd0c4..37591a4b1346 100644
+--- a/drivers/usb/chipidea/ci_hdrc_msm.c
++++ b/drivers/usb/chipidea/ci_hdrc_msm.c
+@@ -24,7 +24,6 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+ switch (event) {
+ case CI_HDRC_CONTROLLER_RESET_EVENT:
+ dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
+- writel(0, USB_AHBBURST);
+ /* use AHB transactor, allow posted data writes */
+ writel(0x8, USB_AHBMODE);
+ usb_phy_init(ci->usb_phy);
+@@ -47,7 +46,8 @@ static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
+ .name = "ci_hdrc_msm",
+ .capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_REGS_SHARED |
+- CI_HDRC_DISABLE_STREAMING,
++ CI_HDRC_DISABLE_STREAMING |
++ CI_HDRC_OVERRIDE_AHB_BURST,
+
+ .notify_event = ci_hdrc_msm_notify_event,
+ };
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a8a4fe4ffa30..16768abf7f7c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status)
+ {
+ struct dwc3 *dwc = dep->dwc;
++ unsigned int unmap_after_complete = false;
+
+ req->started = false;
+ list_del(&req->list);
+@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+
+- if (dwc->ep0_bounced && dep->number <= 1)
++ /*
++ * NOTICE we don't want to unmap before calling ->complete() if we're
++ * dealing with a bounced ep0 request. If we unmap it here, we would end
++ * up overwritting the contents of req->buf and this could confuse the
++ * gadget driver.
++ */
++ if (dwc->ep0_bounced && dep->number <= 1) {
+ dwc->ep0_bounced = false;
+-
+- usb_gadget_unmap_request_by_dev(dwc->sysdev,
+- &req->request, req->direction);
++ unmap_after_complete = true;
++ } else {
++ usb_gadget_unmap_request_by_dev(dwc->sysdev,
++ &req->request, req->direction);
++ }
+
+ trace_dwc3_gadget_giveback(req);
+
+@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ usb_gadget_giveback_request(&dep->endpoint, &req->request);
+ spin_lock(&dwc->lock);
+
++ if (unmap_after_complete)
++ usb_gadget_unmap_request_by_dev(dwc->sysdev,
++ &req->request, req->direction);
++
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
+ }
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index 487f0ff6ae25..76f0b0df37c1 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -54,11 +54,12 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
+
+ int dwc3_host_init(struct dwc3 *dwc)
+ {
+- struct property_entry props[2];
++ struct property_entry props[3];
+ struct platform_device *xhci;
+ int ret, irq;
+ struct resource *res;
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
++ int prop_idx = 0;
+
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
+@@ -97,8 +98,22 @@ int dwc3_host_init(struct dwc3 *dwc)
+
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+
+- if (dwc->usb3_lpm_capable) {
+- props[0].name = "usb3-lpm-capable";
++ if (dwc->usb3_lpm_capable)
++ props[prop_idx++].name = "usb3-lpm-capable";
++
++ /**
++ * WORKAROUND: dwc3 revisions <=3.00a have a limitation
++ * where Port Disable command doesn't work.
++ *
++ * The suggested workaround is that we avoid Port Disable
++ * completely.
++ *
++ * This following flag tells XHCI to do just that.
++ */
++ if (dwc->revision <= DWC3_REVISION_300A)
++ props[prop_idx++].name = "quirk-broken-port-ped";
++
++ if (prop_idx) {
+ ret = platform_device_add_properties(xhci, props);
+ if (ret) {
+ dev_err(dwc->dev, "failed to add properties to xHCI\n");
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 0ef16900efed..1d41637a53e5 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -458,6 +458,12 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ return;
+ }
+
++ if (xhci->quirks & XHCI_BROKEN_PORT_PED) {
++ xhci_dbg(xhci,
++ "Broken Port Enabled/Disabled, ignoring port disable request.\n");
++ return;
++ }
++
+ /* Write 1 to disable the port */
+ writel(port_status | PORT_PE, addr);
+ port_status = readl(addr);
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 9715200eb36e..bd02a6cd8e2c 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -232,6 +232,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+
++ if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
++ xhci->quirks |= XHCI_BROKEN_PORT_PED;
++
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ if (IS_ERR(hcd->usb_phy)) {
+ ret = PTR_ERR(hcd->usb_phy);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 2d7b6374b58d..ea18bf49c2eb 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1650,6 +1650,9 @@ struct xhci_hcd {
+ #define XHCI_SSIC_PORT_UNUSED (1 << 22)
+ #define XHCI_NO_64BIT_SUPPORT (1 << 23)
+ #define XHCI_MISSING_CAS (1 << 24)
++/* For controller with a broken Port Disable implementation */
++#define XHCI_BROKEN_PORT_PED (1 << 25)
++
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 16cc18369111..9129f6cb8230 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
++/*
++ * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
++ * The INIC-3619 bridge is used in the StarTech SLSODDU33B
++ * SATA-USB enclosure for slimline optical drives.
++ *
++ * The quirk enables MakeMKV to properly exchange keys with
++ * an installed BD drive.
++ */
++UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209,
++ "Initio Corporation",
++ "INIC-3619",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_IGNORE_RESIDUE ),
++
+ /* Reported by Qinglin Ye <yestyle@gmail.com> */
+ UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
+index 59e95762a6de..c5a567a73f59 100644
+--- a/drivers/watchdog/s3c2410_wdt.c
++++ b/drivers/watchdog/s3c2410_wdt.c
+@@ -46,6 +46,7 @@
+ #define S3C2410_WTCON 0x00
+ #define S3C2410_WTDAT 0x04
+ #define S3C2410_WTCNT 0x08
++#define S3C2410_WTCLRINT 0x0c
+
+ #define S3C2410_WTCNT_MAXCNT 0xffff
+
+@@ -72,6 +73,7 @@
+ #define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
+ #define QUIRK_HAS_PMU_CONFIG (1 << 0)
+ #define QUIRK_HAS_RST_STAT (1 << 1)
++#define QUIRK_HAS_WTCLRINT_REG (1 << 2)
+
+ /* These quirks require that we have a PMU register map */
+ #define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
+@@ -143,13 +145,18 @@ static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
+ };
+
+ #ifdef CONFIG_OF
++static const struct s3c2410_wdt_variant drv_data_s3c6410 = {
++ .quirks = QUIRK_HAS_WTCLRINT_REG,
++};
++
+ static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
+ .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+ .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+ .mask_bit = 20,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 20,
+- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
++ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
++ | QUIRK_HAS_WTCLRINT_REG,
+ };
+
+ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+@@ -158,7 +165,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+ .mask_bit = 0,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 9,
+- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
++ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
++ | QUIRK_HAS_WTCLRINT_REG,
+ };
+
+ static const struct s3c2410_wdt_variant drv_data_exynos7 = {
+@@ -167,12 +175,15 @@ static const struct s3c2410_wdt_variant drv_data_exynos7 = {
+ .mask_bit = 23,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 23, /* A57 WDTRESET */
+- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
++ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
++ | QUIRK_HAS_WTCLRINT_REG,
+ };
+
+ static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt",
+ .data = &drv_data_s3c2410 },
++ { .compatible = "samsung,s3c6410-wdt",
++ .data = &drv_data_s3c6410 },
+ { .compatible = "samsung,exynos5250-wdt",
+ .data = &drv_data_exynos5250 },
+ { .compatible = "samsung,exynos5420-wdt",
+@@ -418,6 +429,10 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
+ dev_info(wdt->dev, "watchdog timer expired (irq)\n");
+
+ s3c2410wdt_keepalive(&wdt->wdt_device);
++
++ if (wdt->drv_data->quirks & QUIRK_HAS_WTCLRINT_REG)
++ writel(0x1, wdt->reg_base + S3C2410_WTCLRINT);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 87457227812c..bdd32925a15e 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1104,6 +1104,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ return -EINVAL;
+ }
+
++ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
++ if (tcon)
++ tcon->tid = 0;
++
+ rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+ if (rc) {
+ kfree(unc_path);
+diff --git a/fs/dax.c b/fs/dax.c
+index c45598b912e1..a39b404b646a 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -369,6 +369,22 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
+ }
+ spin_lock_irq(&mapping->tree_lock);
+
++ if (!entry) {
++ /*
++ * We needed to drop the page_tree lock while calling
++ * radix_tree_preload() and we didn't have an entry to
++ * lock. See if another thread inserted an entry at
++ * our index during this time.
++ */
++ entry = __radix_tree_lookup(&mapping->page_tree, index,
++ NULL, &slot);
++ if (entry) {
++ radix_tree_preload_end();
++ spin_unlock_irq(&mapping->tree_lock);
++ goto restart;
++ }
++ }
++
+ if (pmd_downgrade) {
+ radix_tree_delete(&mapping->page_tree, index);
+ mapping->nrexceptional--;
+@@ -384,19 +400,12 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
+ if (err) {
+ spin_unlock_irq(&mapping->tree_lock);
+ /*
+- * Someone already created the entry? This is a
+- * normal failure when inserting PMDs in a range
+- * that already contains PTEs. In that case we want
+- * to return -EEXIST immediately.
+- */
+- if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
+- goto restart;
+- /*
+- * Our insertion of a DAX PMD entry failed, most
+- * likely because it collided with a PTE sized entry
+- * at a different index in the PMD range. We haven't
+- * inserted anything into the radix tree and have no
+- * waiters to wake.
++ * Our insertion of a DAX entry failed, most likely
++ * because we were inserting a PMD entry and it
++ * collided with a PTE sized entry at a different
++ * index in the PMD range. We haven't inserted
++ * anything into the radix tree and have no waiters to
++ * wake.
+ */
+ return ERR_PTR(err);
+ }
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 67c24351a67f..cd261c8de53a 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
+ if (!new_op)
+ return -ENOMEM;
+ new_op->upcall.req.features.features = 0;
+- ret = service_operation(new_op, "orangefs_features", 0);
+- orangefs_features = new_op->downcall.resp.features.features;
++ ret = service_operation(new_op, "orangefs_features",
++ ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
++ if (!ret)
++ orangefs_features =
++ new_op->downcall.resp.features.features;
++ else
++ orangefs_features = 0;
+ op_release(new_op);
+ } else {
+ orangefs_features = 0;
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index b803213d1307..39c75a86c67f 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+ {
+ const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+ struct kobject *kobj = of->kn->parent->priv;
+- size_t len;
++ ssize_t len;
+
+ /*
+ * If buf != of->prealloc_buf, we don't know how
+@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+ if (WARN_ON_ONCE(buf != of->prealloc_buf))
+ return 0;
+ len = ops->show(kobj, of->kn->priv, buf);
++ if (len < 0)
++ return len;
+ if (pos) {
+ if (len <= pos)
+ return 0;
+ len -= pos;
+ memmove(buf, buf + pos, len);
+ }
+- return min(count, len);
++ return min_t(ssize_t, count, len);
+ }
+
+ /* kernfs write callback for regular sysfs files */
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index c516d7158a21..205ab55d595d 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1318,8 +1318,16 @@ xfs_free_file_space(
+ /*
+ * Now that we've unmap all full blocks we'll have to zero out any
+ * partial block at the beginning and/or end. xfs_zero_range is
+- * smart enough to skip any holes, including those we just created.
++ * smart enough to skip any holes, including those we just created,
++ * but we must take care not to zero beyond EOF and enlarge i_size.
+ */
++
++ if (offset >= XFS_ISIZE(ip))
++ return 0;
++
++ if (offset + len > XFS_ISIZE(ip))
++ len = XFS_ISIZE(ip) - offset;
++
+ return xfs_zero_range(ip, offset, len, NULL);
+ }
+
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index 0d5f4268d75f..61766a420f6b 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -226,23 +226,18 @@
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
+
+-#define INTEL_BDW_RSVDM_IDS(info) \
++#define INTEL_BDW_RSVD_IDS(info) \
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+- INTEL_VGA_DEVICE(0x163E, info) /* ULX */
+-
+-#define INTEL_BDW_RSVDD_IDS(info) \
++ INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
+
+ #define INTEL_BDW_IDS(info) \
+ INTEL_BDW_GT12_IDS(info), \
+ INTEL_BDW_GT3_IDS(info), \
+- INTEL_BDW_RSVDM_IDS(info), \
+- INTEL_BDW_GT12_IDS(info), \
+- INTEL_BDW_GT3_IDS(info), \
+- INTEL_BDW_RSVDD_IDS(info)
++ INTEL_BDW_RSVD_IDS(info)
+
+ #define INTEL_CHV_IDS(info) \
+ INTEL_VGA_DEVICE(0x22b0, info), \
+diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
+index ed953f98f0e1..1487011fe057 100644
+--- a/include/drm/ttm/ttm_object.h
++++ b/include/drm/ttm/ttm_object.h
+@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
++ * @require_existed: Fail with -EPERM if an identical ref object didn't
++ * already exist.
+ *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
+@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+ */
+ extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+- enum ttm_ref_type ref_type, bool *existed);
++ enum ttm_ref_type ref_type, bool *existed,
++ bool require_existed);
+
+ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base);
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index b5abfda80465..4c5bca38c653 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -14,9 +14,6 @@
+ #ifndef __LINUX_ARM_SMCCC_H
+ #define __LINUX_ARM_SMCCC_H
+
+-#include <linux/linkage.h>
+-#include <linux/types.h>
+-
+ /*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+@@ -60,6 +57,13 @@
+ #define ARM_SMCCC_OWNER_TRUSTED_OS 50
+ #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+
++#define ARM_SMCCC_QUIRK_NONE 0
++#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
++
++#ifndef __ASSEMBLY__
++
++#include <linux/linkage.h>
++#include <linux/types.h>
+ /**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+@@ -72,33 +76,59 @@ struct arm_smccc_res {
+ };
+
+ /**
+- * arm_smccc_smc() - make SMC calls
++ * struct arm_smccc_quirk - Contains quirk information
++ * @id: quirk identification
++ * @state: quirk specific information
++ * @a6: Qualcomm quirk entry for returning post-smc call contents of a6
++ */
++struct arm_smccc_quirk {
++ int id;
++ union {
++ unsigned long a6;
++ } state;
++};
++
++/**
++ * __arm_smccc_smc() - make SMC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
++ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
+ *
+ * This function is used to make SMC calls following SMC Calling Convention.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+- * from register 0 to 3 on return from the SMC instruction.
++ * from register 0 to 3 on return from the SMC instruction. An optional
++ * quirk structure provides vendor specific behavior.
+ */
+-asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
++asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+- struct arm_smccc_res *res);
++ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+
+ /**
+- * arm_smccc_hvc() - make HVC calls
++ * __arm_smccc_hvc() - make HVC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
++ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
+ *
+ * This function is used to make HVC calls following SMC Calling
+ * Convention. The content of the supplied param are copied to registers 0
+ * to 7 prior to the HVC instruction. The return values are updated with
+- * the content from register 0 to 3 on return from the HVC instruction.
++ * the content from register 0 to 3 on return from the HVC instruction. An
++ * optional quirk structure provides vendor specific behavior.
+ */
+-asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
++asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+- struct arm_smccc_res *res);
++ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
++
++#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
++
++#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
++
++#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
++
++#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+
++#endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 73dda0edcb97..a4f77feecbb0 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2516,6 +2516,8 @@
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+
++#define PCI_VENDOR_ID_HUAWEI 0x19e5
++
+ #define PCI_VENDOR_ID_NETRONOME 0x19ee
+ #define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
+ #define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 7bd2403e4fef..16ab429735a7 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -37,7 +37,6 @@ extern void get_random_bytes(void *buf, int nbytes);
+ extern int add_random_ready_callback(struct random_ready_callback *rdy);
+ extern void del_random_ready_callback(struct random_ready_callback *rdy);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+-extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+diff --git a/init/main.c b/init/main.c
+index b0c9d6facef9..09beb7fc6e8c 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -879,7 +879,6 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
+- random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 49ba7c1ade9d..a5caecef88be 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -181,11 +181,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
+
+ WARN_ON(!task->ptrace || task->parent != current);
+
++ /*
++ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
++ * Recheck state under the lock to close this race.
++ */
+ spin_lock_irq(&task->sighand->siglock);
+- if (__fatal_signal_pending(task))
+- wake_up_state(task, __TASK_TRACED);
+- else
+- task->state = TASK_TRACED;
++ if (task->state == __TASK_TRACED) {
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ }
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index a85739efcc30..8df48ccb8af6 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4825,9 +4825,9 @@ static __init int test_ringbuffer(void)
+ rb_data[cpu].cnt = cpu;
+ rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
+ "rbtester/%d", cpu);
+- if (WARN_ON(!rb_threads[cpu])) {
++ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
+ pr_cont("FAILED\n");
+- ret = -1;
++ ret = PTR_ERR(rb_threads[cpu]);
+ goto out_free;
+ }
+
+@@ -4837,9 +4837,9 @@ static __init int test_ringbuffer(void)
+
+ /* Now create the rb hammer! */
+ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
+- if (WARN_ON(!rb_hammer)) {
++ if (WARN_ON(IS_ERR(rb_hammer))) {
+ pr_cont("FAILED\n");
+- ret = -1;
++ ret = PTR_ERR(rb_hammer);
+ goto out_free;
+ }
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 1e7873e40c9a..dc8a2672c407 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1526,7 +1526,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1535,14 +1534,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(bm, nmask, nr_bits);
++ if (compat_get_bitmap(bm, nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, bm, alloc_size);
++ if (copy_to_user(nm, bm, alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+
+@@ -1550,7 +1548,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+ compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode, compat_ulong_t, flags)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ nodemask_t bm;
+@@ -1559,14 +1556,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 1a5f6655958e..1aec370bf9e9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4381,13 +4381,13 @@ void show_free_areas(unsigned int filter)
+ K(node_page_state(pgdat, NR_FILE_MAPPED)),
+ K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ K(node_page_state(pgdat, NR_WRITEBACK)),
++ K(node_page_state(pgdat, NR_SHMEM)),
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
+ K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
+ * HPAGE_PMD_NR),
+ K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
+ #endif
+- K(node_page_state(pgdat, NR_SHMEM)),
+ K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+ node_page_state(pgdat, NR_PAGES_SCANNED),
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index d37ae7dc114b..56d491950390 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+ ieee80211_recalc_ps(local);
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+- sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
++ local->ops->wake_tx_queue) {
+ /* XXX: for AP_VLAN, actually track AP queues */
+ netif_tx_start_all_queues(dev);
+ } else if (dev) {
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 14b3f007826d..2927d06faa6e 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -130,12 +130,10 @@ static int wiphy_resume(struct device *dev)
+ /* Age scan results with time spent in suspend */
+ cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
+
+- if (rdev->ops->resume) {
+- rtnl_lock();
+- if (rdev->wiphy.registered)
+- ret = rdev_resume(rdev);
+- rtnl_unlock();
+- }
++ rtnl_lock();
++ if (rdev->wiphy.registered && rdev->ops->resume)
++ ret = rdev_resume(rdev);
++ rtnl_unlock();
+
+ return ret;
+ }
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 97bafac3bc15..17d20b99f041 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -2814,6 +2814,7 @@ MODULE_DEVICE_TABLE(i2c, rt5670_i2c_id);
+ static const struct acpi_device_id rt5670_acpi_match[] = {
+ { "10EC5670", 0},
+ { "10EC5672", 0},
++ { "10EC5640", 0}, /* quirk */
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
+diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
+index f4d92bbc5373..63820080dd16 100644
+--- a/sound/soc/intel/atom/sst/sst_acpi.c
++++ b/sound/soc/intel/atom/sst/sst_acpi.c
+@@ -400,6 +400,7 @@ static int sst_acpi_remove(struct platform_device *pdev)
+ static unsigned long cht_machine_id;
+
+ #define CHT_SURFACE_MACH 1
++#define BYT_THINKPAD_10 2
+
+ static int cht_surface_quirk_cb(const struct dmi_system_id *id)
+ {
+@@ -407,6 +408,23 @@ static int cht_surface_quirk_cb(const struct dmi_system_id *id)
+ return 1;
+ }
+
++static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
++{
++ cht_machine_id = BYT_THINKPAD_10;
++ return 1;
++}
++
++
++static const struct dmi_system_id byt_table[] = {
++ {
++ .callback = byt_thinkpad10_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "20C3001VHH"),
++ },
++ },
++ { }
++};
+
+ static const struct dmi_system_id cht_table[] = {
+ {
+@@ -424,6 +442,10 @@ static struct sst_acpi_mach cht_surface_mach = {
+ "10EC5640", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
+ &chv_platform_data };
+
++static struct sst_acpi_mach byt_thinkpad_10 = {
++ "10EC5640", "cht-bsw-rt5672", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
++ &byt_rvp_platform_data };
++
+ static struct sst_acpi_mach *cht_quirk(void *arg)
+ {
+ struct sst_acpi_mach *mach = arg;
+@@ -436,8 +458,21 @@ static struct sst_acpi_mach *cht_quirk(void *arg)
+ return mach;
+ }
+
++static struct sst_acpi_mach *byt_quirk(void *arg)
++{
++ struct sst_acpi_mach *mach = arg;
++
++ dmi_check_system(byt_table);
++
++ if (cht_machine_id == BYT_THINKPAD_10)
++ return &byt_thinkpad_10;
++ else
++ return mach;
++}
++
++
+ static struct sst_acpi_mach sst_acpi_bytcr[] = {
+- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
++ {"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", byt_quirk,
+ &byt_rvp_platform_data },
+ {"10EC5642", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
+ &byt_rvp_platform_data },
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 8d2fb2d6f532..1bd985f01c73 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -387,6 +387,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1),
+
+ },
++ {
++ .callback = byt_rt5640_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++ },
++ .driver_data = (unsigned long *)(BYT_RT5640_IN3_MAP |
++ BYT_RT5640_MCLK_EN |
++ BYT_RT5640_SSP0_AIF1),
++
++ },
+ {}
+ };
+
+diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
+index f504a0e18f91..753938371965 100644
+--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
++++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
+@@ -24,6 +24,9 @@
+ #include <linux/acpi.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <asm/cpu_device_id.h>
++#include <asm/platform_sst_audio.h>
++#include <linux/clk.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+@@ -45,6 +48,7 @@ struct cht_mc_private {
+ struct snd_soc_jack jack;
+ struct cht_acpi_card *acpi_card;
+ char codec_name[16];
++ struct clk *mclk;
+ };
+
+ static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
+@@ -65,6 +69,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
++ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ codec_dai = cht_get_codec_dai(card);
+@@ -73,19 +78,30 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ return -EIO;
+ }
+
+- if (!SND_SOC_DAPM_EVENT_OFF(event))
+- return 0;
++ if (SND_SOC_DAPM_EVENT_ON(event)) {
++ if (ctx->mclk) {
++ ret = clk_prepare_enable(ctx->mclk);
++ if (ret < 0) {
++ dev_err(card->dev,
++ "could not configure MCLK state");
++ return ret;
++ }
++ }
++ } else {
++ /* Set codec sysclk source to its internal clock because codec PLL will
++ * be off when idle and MCLK will also be off when codec is
++ * runtime suspended. Codec needs clock for jack detection and button
++ * press. MCLK is turned off with clock framework or ACPI.
++ */
++ ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_RCCLK,
++ 48000 * 512, SND_SOC_CLOCK_IN);
++ if (ret < 0) {
++ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
++ return ret;
++ }
+
+- /* Set codec sysclk source to its internal clock because codec PLL will
+- * be off when idle and MCLK will also be off by ACPI when codec is
+- * runtime suspended. Codec needs clock for jack detection and button
+- * press.
+- */
+- ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_RCCLK,
+- 0, SND_SOC_CLOCK_IN);
+- if (ret < 0) {
+- dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+- return ret;
++ if (ctx->mclk)
++ clk_disable_unprepare(ctx->mclk);
+ }
+
+ return 0;
+@@ -97,7 +113,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+- platform_clock_control, SND_SOC_DAPM_POST_PMD),
++ platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ };
+
+ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
+@@ -225,6 +241,26 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
+
+ rt5645_set_jack_detect(codec, &ctx->jack, &ctx->jack, &ctx->jack);
+
++ if (ctx->mclk) {
++ /*
++ * The firmware might enable the clock at
++ * boot (this information may or may not
++ * be reflected in the enable clock register).
++ * To change the rate we must disable the clock
++ * first to cover these cases. Due to common
++ * clock framework restrictions that do not allow
++ * to disable a clock that has not been enabled,
++ * we need to enable the clock first.
++ */
++ ret = clk_prepare_enable(ctx->mclk);
++ if (!ret)
++ clk_disable_unprepare(ctx->mclk);
++
++ ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
++
++ if (ret)
++ dev_err(runtime->dev, "unable to set MCLK rate\n");
++ }
+ return ret;
+ }
+
+@@ -349,6 +385,18 @@ static struct cht_acpi_card snd_soc_cards[] = {
+
+ static char cht_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+
++static bool is_valleyview(void)
++{
++ static const struct x86_cpu_id cpu_ids[] = {
++ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
++ {}
++ };
++
++ if (!x86_match_cpu(cpu_ids))
++ return false;
++ return true;
++}
++
+ static int snd_cht_mc_probe(struct platform_device *pdev)
+ {
+ int ret_val = 0;
+@@ -358,22 +406,32 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
+ struct sst_acpi_mach *mach;
+ const char *i2c_name = NULL;
+ int dai_index = 0;
++ bool found = false;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
+ if (!drv)
+ return -ENOMEM;
+
++ mach = (&pdev->dev)->platform_data;
++
+ for (i = 0; i < ARRAY_SIZE(snd_soc_cards); i++) {
+- if (acpi_dev_found(snd_soc_cards[i].codec_id)) {
++ if (acpi_dev_found(snd_soc_cards[i].codec_id) &&
++ (!strncmp(snd_soc_cards[i].codec_id, mach->id, 8))) {
+ dev_dbg(&pdev->dev,
+ "found codec %s\n", snd_soc_cards[i].codec_id);
+ card = snd_soc_cards[i].soc_card;
+ drv->acpi_card = &snd_soc_cards[i];
++ found = true;
+ break;
+ }
+ }
++
++ if (!found) {
++ dev_err(&pdev->dev, "No matching HID found in supported list\n");
++ return -ENODEV;
++ }
++
+ card->dev = &pdev->dev;
+- mach = card->dev->platform_data;
+ sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
+
+ /* set correct codec name */
+@@ -391,6 +449,16 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
+ cht_dailink[dai_index].codec_name = cht_rt5640_codec_name;
+ }
+
++ if (is_valleyview()) {
++ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
++ if (IS_ERR(drv->mclk)) {
++ dev_err(&pdev->dev,
++ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
++ PTR_ERR(drv->mclk));
++ return PTR_ERR(drv->mclk);
++ }
++ }
++
+ snd_soc_card_set_drvdata(card, drv);
+ ret_val = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret_val) {
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index f24d19526603..268f2bf691b3 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -14,9 +14,11 @@
+ #include <linux/clk.h>
+ #include <linux/dmaengine.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
++#include <linux/reset.h>
+
+ #include <sound/dmaengine_pcm.h>
+ #include <sound/pcm_params.h>
+@@ -92,6 +94,7 @@ struct sun4i_i2s {
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct regmap *regmap;
++ struct reset_control *rst;
+
+ unsigned int mclk_freq;
+
+@@ -651,9 +654,22 @@ static int sun4i_i2s_runtime_suspend(struct device *dev)
+ return 0;
+ }
+
++struct sun4i_i2s_quirks {
++ bool has_reset;
++};
++
++static const struct sun4i_i2s_quirks sun4i_a10_i2s_quirks = {
++ .has_reset = false,
++};
++
++static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
++ .has_reset = true,
++};
++
+ static int sun4i_i2s_probe(struct platform_device *pdev)
+ {
+ struct sun4i_i2s *i2s;
++ const struct sun4i_i2s_quirks *quirks;
+ struct resource *res;
+ void __iomem *regs;
+ int irq, ret;
+@@ -674,6 +690,12 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
+ return irq;
+ }
+
++ quirks = of_device_get_match_data(&pdev->dev);
++ if (!quirks) {
++ dev_err(&pdev->dev, "Failed to determine the quirks to use\n");
++ return -ENODEV;
++ }
++
+ i2s->bus_clk = devm_clk_get(&pdev->dev, "apb");
+ if (IS_ERR(i2s->bus_clk)) {
+ dev_err(&pdev->dev, "Can't get our bus clock\n");
+@@ -692,7 +714,24 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "Can't get our mod clock\n");
+ return PTR_ERR(i2s->mod_clk);
+ }
+-
++
++ if (quirks->has_reset) {
++ i2s->rst = devm_reset_control_get(&pdev->dev, NULL);
++ if (IS_ERR(i2s->rst)) {
++ dev_err(&pdev->dev, "Failed to get reset control\n");
++ return PTR_ERR(i2s->rst);
++ }
++ }
++
++ if (!IS_ERR(i2s->rst)) {
++ ret = reset_control_deassert(i2s->rst);
++ if (ret) {
++ dev_err(&pdev->dev,
++ "Failed to deassert the reset control\n");
++ return -EINVAL;
++ }
++ }
++
+ i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
+ i2s->playback_dma_data.maxburst = 4;
+
+@@ -727,23 +766,37 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+ err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
++ if (!IS_ERR(i2s->rst))
++ reset_control_assert(i2s->rst);
+
+ return ret;
+ }
+
+ static int sun4i_i2s_remove(struct platform_device *pdev)
+ {
++ struct sun4i_i2s *i2s = dev_get_drvdata(&pdev->dev);
++
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+
++ if (!IS_ERR(i2s->rst))
++ reset_control_assert(i2s->rst);
++
+ return 0;
+ }
+
+ static const struct of_device_id sun4i_i2s_match[] = {
+- { .compatible = "allwinner,sun4i-a10-i2s", },
++ {
++ .compatible = "allwinner,sun4i-a10-i2s",
++ .data = &sun4i_a10_i2s_quirks,
++ },
++ {
++ .compatible = "allwinner,sun6i-a31-i2s",
++ .data = &sun6i_a31_i2s_quirks,
++ },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_i2s_match);