diff options
author | 2018-09-05 11:28:34 -0400 | |
---|---|---|
committer | 2018-11-14 09:00:41 -0500 | |
commit | e8f7b595d24caaeec8ddbb979a63ef70832c51dc (patch) | |
tree | bcc285bea33bf6ba740837d48696f2d8b029413d | |
parent | Linux patch 4.14.67 (diff) | |
download | linux-patches-e8f7b595d24caaeec8ddbb979a63ef70832c51dc.tar.gz linux-patches-e8f7b595d24caaeec8ddbb979a63ef70832c51dc.tar.bz2 linux-patches-e8f7b595d24caaeec8ddbb979a63ef70832c51dc.zip |
Linux patch 4.14.68
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1067_linux-4.14.68.patch | 5725 |
2 files changed, 5729 insertions, 0 deletions
diff --git a/0000_README b/0000_README index de230d4f..4fd9ed95 100644 --- a/0000_README +++ b/0000_README @@ -311,6 +311,10 @@ Patch: 1066_linux-4.14.67.patch From: http://www.kernel.org Desc: Linux 4.14.67 +Patch: 1067_linux-4.14.68.patch +From: http://www.kernel.org +Desc: Linux 4.14.68 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1067_linux-4.14.68.patch b/1067_linux-4.14.68.patch new file mode 100644 index 00000000..4089fa83 --- /dev/null +++ b/1067_linux-4.14.68.patch @@ -0,0 +1,5725 @@ +diff --git a/Makefile b/Makefile +index 4dad2d1c24ba..3da579058926 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 67 ++SUBLEVEL = 68 + EXTRAVERSION = + NAME = Petit Gorille + +@@ -490,9 +490,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) + endif + + RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register ++RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register + RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk ++RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline + RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) ++RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) + export RETPOLINE_CFLAGS ++export RETPOLINE_VDSO_CFLAGS + + ifeq ($(config-targets),1) + # =========================================================================== +diff --git a/arch/Kconfig b/arch/Kconfig +index 4e01862f58e4..40dc31fea90c 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -336,6 +336,9 @@ config HAVE_ARCH_JUMP_LABEL + config HAVE_RCU_TABLE_FREE + bool + ++config HAVE_RCU_TABLE_INVALIDATE ++ bool ++ + config ARCH_HAVE_NMI_SAFE_CMPXCHG + bool + +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig +index 5c8caf85c350..8ff066090680 100644 +--- a/arch/arc/Kconfig ++++ b/arch/arc/Kconfig +@@ -45,6 +45,9 @@ config ARC + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZMA + ++config ARCH_HAS_CACHE_LINE_SIZE ++ def_bool y ++ + config MIGHT_HAVE_PCI + bool + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index 8486f328cc5d..ff7d3232764a 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -48,7 +48,9 @@ + }) + + /* Largest line length for either L1 or L2 is 128 bytes */ +-#define ARCH_DMA_MINALIGN 128 ++#define SMP_CACHE_BYTES 128 ++#define cache_line_size() SMP_CACHE_BYTES ++#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES + + extern void arc_cache_init(void); + extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); +diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h +index d5da2115d78a..03d6bb0f4e13 100644 +--- a/arch/arc/include/asm/delay.h ++++ b/arch/arc/include/asm/delay.h +@@ -17,8 +17,11 @@ + #ifndef __ASM_ARC_UDELAY_H + #define __ASM_ARC_UDELAY_H + ++#include <asm-generic/types.h> + #include <asm/param.h> /* HZ */ + ++extern unsigned long loops_per_jiffy; ++ + static inline void __delay(unsigned long loops) + { + __asm__ __volatile__( +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index eee924dfffa6..d14499500106 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm) + void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, + unsigned long pfn) + { +- unsigned int paddr = pfn << PAGE_SHIFT; ++ phys_addr_t paddr = pfn << PAGE_SHIFT; + + u_vaddr &= PAGE_MASK; + +@@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) + { + /* TBD: do we really need to clear the kernel mapping */ +- __flush_dcache_page(page_address(page), u_vaddr); +- __flush_dcache_page(page_address(page), page_address(page)); ++ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); ++ __flush_dcache_page((phys_addr_t)page_address(page), ++ (phys_addr_t)page_address(page)); + + } + +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h +index 0c7d11022d0f..4f6a1673b3a6 100644 +--- a/arch/arc/plat-eznps/include/plat/ctop.h ++++ b/arch/arc/plat-eznps/include/plat/ctop.h +@@ -21,6 +21,7 @@ + #error "Incorrect ctop.h include" + #endif + ++#include <linux/types.h> + #include <soc/nps/common.h> + + /* core auxiliary registers */ +@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst { + }; + + /* AUX registers definition */ ++struct nps_host_reg_aux_dpc { ++ union { ++ struct { ++ u32 ien:1, men:1, hen:1, reserved:29; ++ }; ++ u32 value; ++ }; ++}; ++ + struct nps_host_reg_aux_udmc { + union { + struct { +diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c +index 2388de3d09ef..ed0077ef666e 100644 +--- a/arch/arc/plat-eznps/mtm.c ++++ b/arch/arc/plat-eznps/mtm.c +@@ -15,6 +15,8 @@ + */ + + #include <linux/smp.h> ++#include <linux/init.h> ++#include <linux/kernel.h> + #include <linux/io.h> + #include <linux/log2.h> + #include <asm/arcregs.h> +@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu) + /* Verify and set the value of the mtm hs counter */ + static int __init set_mtm_hs_ctr(char *ctr_str) + { +- long hs_ctr; ++ int hs_ctr; + int ret; + +- ret = kstrtol(ctr_str, 0, &hs_ctr); ++ ret = kstrtoint(ctr_str, 0, &hs_ctr); + + if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { + pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", +diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c +index 52d1cd14fda4..091e9a3c2dcb 100644 +--- a/arch/arm/probes/kprobes/core.c ++++ b/arch/arm/probes/kprobes/core.c +@@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) + break; + case KPROBE_REENTER: + /* A nested probe was hit in FIQ, it is a BUG */ +- pr_warn("Unrecoverable kprobe detected at %p.\n", +- p->addr); ++ pr_warn("Unrecoverable kprobe detected.\n"); ++ dump_kprobe(p); + /* fall through */ + default: + /* impossible cases */ +diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c +index 1c98a87786ca..a10d7187ad2c 100644 +--- a/arch/arm/probes/kprobes/test-core.c ++++ b/arch/arm/probes/kprobes/test-core.c +@@ -1517,7 +1517,6 @@ fail: + print_registers(&result_regs); + + if (mem) { +- pr_err("current_stack=%p\n", current_stack); + pr_err("expected_memory:\n"); + print_memory(expected_memory, mem_size); + pr_err("result_memory:\n"); +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +index d70e409e2b0c..efac2202b16e 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +@@ -331,7 +331,7 @@ + reg = <0x0 0xff120000 0x0 0x100>; + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; +- clock-names = "sclk_uart", "pclk_uart"; ++ clock-names = "baudclk", "apb_pclk"; + dmas = <&dmac 4>, <&dmac 5>; + #dma-cells = <2>; + pinctrl-names = "default"; +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c +index d849d9804011..22a5921562c7 100644 +--- a/arch/arm64/kernel/probes/kprobes.c ++++ b/arch/arm64/kernel/probes/kprobes.c +@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); ++ pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 1190d90e01e6..caa295cd5d09 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) + #ifdef CONFIG_HAVE_ARCH_PFN_VALID + int pfn_valid(unsigned long pfn) + { +- return memblock_is_map_memory(pfn << PAGE_SHIFT); ++ phys_addr_t addr = pfn << PAGE_SHIFT; ++ ++ if ((addr >> PAGE_SHIFT) != pfn) ++ return 0; ++ return memblock_is_map_memory(addr); + } + EXPORT_SYMBOL(pfn_valid); + #endif +diff --git a/arch/mips/Makefile b/arch/mips/Makefile +index a96d97a806c9..5977884b008e 100644 +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap + cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap + cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap + cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap +-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ +- -Wa,-mips32 -Wa,--trap +-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ +- -Wa,-mips32r2 -Wa,--trap ++cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap ++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg +-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ +- -Wa,-mips64 -Wa,--trap +-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ +- -Wa,-mips64r2 -Wa,--trap ++cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap ++cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap + cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap + cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap + cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c +index 8c9cbf13d32a..6054d49e608e 100644 +--- a/arch/mips/bcm47xx/setup.c ++++ b/arch/mips/bcm47xx/setup.c +@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void) + */ + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) + cpu_wait = NULL; +- +- /* +- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" +- * Enable ExternalSync for sync instruction to take effect +- */ +- set_c0_config7(MIPS_CONF7_ES); + break; + #endif + } +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index 60c787d943b0..a6810923b3f0 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -680,8 +680,6 @@ + #define MIPS_CONF7_WII (_ULCAST_(1) << 31) + + #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +-/* ExternalSync */ +-#define MIPS_CONF7_ES (_ULCAST_(1) << 8) + + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) + #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +@@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status) + __BUILD_SET_C0(cause) + __BUILD_SET_C0(config) + __BUILD_SET_C0(config5) +-__BUILD_SET_C0(config7) + __BUILD_SET_C0(intcontrol) + __BUILD_SET_C0(intctl) + __BUILD_SET_C0(srsmap) +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h +index 95b8c471f572..eb1f6030ab85 100644 +--- a/arch/mips/include/asm/processor.h ++++ b/arch/mips/include/asm/processor.h +@@ -141,7 +141,7 @@ struct mips_fpu_struct { + + #define NUM_DSP_REGS 6 + +-typedef __u32 dspreg_t; ++typedef unsigned long dspreg_t; + + struct mips_dsp_state { + dspreg_t dspr[NUM_DSP_REGS]; +@@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p); + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) + #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) + ++#ifdef CONFIG_CPU_LOONGSON3 ++/* ++ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a ++ * tight read loop is executed, because reads take priority over writes & the ++ * hardware (incorrectly) doesn't ensure that writes will eventually occur. ++ * ++ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB ++ * flush from cpu_relax() such that any pending writes will become visible as ++ * expected. ++ */ ++#define cpu_relax() smp_mb() ++#else + #define cpu_relax() barrier() ++#endif + + /* + * Return_address is a replacement for __builtin_return_address(count) +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index e058cd300713..efffdf2464ab 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c +index 89026d33a07b..6990240785f6 100644 +--- a/arch/mips/kernel/ptrace32.c ++++ b/arch/mips/kernel/ptrace32.c +@@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c +index 111ad475aa0c..4c2483f410c2 100644 +--- a/arch/mips/lib/multi3.c ++++ b/arch/mips/lib/multi3.c +@@ -4,12 +4,12 @@ + #include "libgcc.h" + + /* +- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that +- * specific case only we'll implement it here. ++ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for ++ * that specific case only we implement that intrinsic here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) ++#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) + + /* multiply 64-bit values, low 64-bits returned */ + static inline long long notrace dmulu(long long a, long long b) +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c +index 254634fb3fc7..fee1e1f8c9d3 100644 +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, + u64 imm64; + u8 *func; + u32 true_cond; ++ u32 tmp_idx; + + /* + * addrs[] maps a BPF bytecode address into a real offset from +@@ -681,11 +682,7 @@ emit_clear: + case BPF_STX | BPF_XADD | BPF_W: + /* Get EA into TMP_REG_1 */ + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not word-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); ++ tmp_idx = ctx->idx * 4; + /* load value from memory into TMP_REG_2 */ + PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + /* add value from src_reg into this */ +@@ -693,32 +690,16 @@ emit_clear: + /* store result back */ + PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); + /* we're done if this succeeded */ +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); +- /* otherwise, let's try once more */ +- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- /* exit if the store was not successful */ +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + /* *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not doubleword-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); +- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); ++ tmp_idx = ctx->idx * 4; + PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); + PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + + /* +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h +index de11ecc99c7c..9c9970a5dfb1 100644 +--- a/arch/s390/include/asm/qdio.h ++++ b/arch/s390/include/asm/qdio.h +@@ -262,7 +262,6 @@ struct qdio_outbuf_state { + void *user; + }; + +-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 + + #define CHSC_AC1_INITIATE_INPUTQ 0x80 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index 242b78c0a9ec..40f1888bc4ab 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -486,6 +486,8 @@ retry: + /* No reason to continue if interrupted by SIGKILL. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { + fault = VM_FAULT_SIGNAL; ++ if (flags & FAULT_FLAG_RETRY_NOWAIT) ++ goto out_up; + goto out; + } + if (unlikely(fault & VM_FAULT_ERROR)) +diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c +index 382153ff17e3..dc3cede7f2ec 100644 +--- a/arch/s390/mm/page-states.c ++++ b/arch/s390/mm/page-states.c +@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable) + list_for_each(l, &zone->free_area[order].free_list[t]) { + page = list_entry(l, struct page, lru); + if (make_stable) +- set_page_stable_dat(page, 0); ++ set_page_stable_dat(page, order); + else + set_page_unused(page, order); + } +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 45f1ea117128..6b1474fa99ab 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) + /* br %r1 */ + _EMIT2(0x07f1); + } else { +- /* larl %r1,.+14 */ +- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,S390_lowcore.br_r1_tampoline */ + EMIT4_DISP(0x44000000, REG_0, REG_0, + offsetof(struct lowcore, br_r1_trampoline)); +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c +index 06a80434cfe6..5bd374491f94 100644 +--- a/arch/s390/numa/numa.c ++++ b/arch/s390/numa/numa.c +@@ -134,26 +134,14 @@ void __init numa_setup(void) + { + pr_info("NUMA mode: %s\n", mode->name); + nodes_clear(node_possible_map); ++ /* Initially attach all possible CPUs to node 0. */ ++ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); + if (mode->setup) + mode->setup(); + numa_setup_memory(); + memblock_dump_all(); + } + +-/* +- * numa_init_early() - Initialization initcall +- * +- * This runs when only one CPU is online and before the first +- * topology update is called for by the scheduler. +- */ +-static int __init numa_init_early(void) +-{ +- /* Attach all possible CPUs to node 0 for now. */ +- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); +- return 0; +-} +-early_initcall(numa_init_early); +- + /* + * numa_init_late() - Initialization initcall + * +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c +index 0fe649c0d542..960c4a362d8c 100644 +--- a/arch/s390/pci/pci.c ++++ b/arch/s390/pci/pci.c +@@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + hwirq = 0; + for_each_pci_msi_entry(msi, pdev) { + rc = -EIO; ++ if (hwirq >= msi_vecs) ++ break; + irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ + if (irq < 0) + return -ENOMEM; +diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild +index 80ddc01f57ac..fcbc0c0aa087 100644 +--- a/arch/sparc/include/asm/Kbuild ++++ b/arch/sparc/include/asm/Kbuild +@@ -14,6 +14,7 @@ generic-y += local64.h + generic-y += mcs_spinlock.h + generic-y += mm-arch-hooks.h + generic-y += module.h ++generic-y += msi.h + generic-y += preempt.h + generic-y += rwsem.h + generic-y += serial.h +diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c +index 3b397081047a..83aaf4888999 100644 +--- a/arch/sparc/kernel/time_64.c ++++ b/arch/sparc/kernel/time_64.c +@@ -813,7 +813,7 @@ static void __init get_tick_patch(void) + } + } + +-static void init_tick_ops(struct sparc64_tick_ops *ops) ++static void __init init_tick_ops(struct sparc64_tick_ops *ops) + { + unsigned long freq, quotient, tick; + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 1c63a4b5320d..2af0af33362a 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -170,6 +170,7 @@ config X86 + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE ++ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION + select HAVE_STACK_VALIDATION if X86_64 +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 98018a621f6b..3a250ca2406c 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -104,9 +104,13 @@ define cmd_check_data_rel + done + endef + ++# We need to run two commands under "if_changed", so merge them into a ++# single invocation. ++quiet_cmd_check-and-link-vmlinux = LD $@ ++ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) ++ + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE +- $(call if_changed,check_data_rel) +- $(call if_changed,ld) ++ $(call if_changed,check-and-link-vmlinux) + + OBJCOPYFLAGS_vmlinux.bin := -R .comment -S + $(obj)/vmlinux.bin: vmlinux FORCE +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile +index c366c0adeb40..b545bf9d2328 100644 +--- a/arch/x86/entry/vdso/Makefile ++++ b/arch/x86/entry/vdso/Makefile +@@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE + CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ + -fno-omit-frame-pointer -foptimize-sibling-calls \ +- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO ++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS) + +-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) ++$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) + + # + # vDSO code runs in userspace and -pg doesn't help with profiling anyway. +@@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) + KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) + KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) + KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) ++KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) + KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic + KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) + KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) + KBUILD_CFLAGS_32 += -fno-omit-frame-pointer + KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING ++KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) + $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) + + $(obj)/vdso32.so.dbg: FORCE \ +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index 786fd875de92..8c51844694e2 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) + { + struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); + struct perf_event *event = pcpu->event; +- struct hw_perf_event *hwc = &event->hw; ++ struct hw_perf_event *hwc; + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; +@@ -602,6 +602,10 @@ fail: + return 0; + } + ++ if (WARN_ON_ONCE(!event)) ++ goto fail; ++ ++ hwc = &event->hw; + msr = hwc->config_base; + buf = ibs_data.regs; + rdmsrl(msr, *buf); +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 717c9219d00e..e5097dc85a06 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs + + perf_callchain_store(entry, regs->ip); + +- if (!current->mm) ++ if (!nmi_uaccess_okay()) + return; + + if (perf_callchain_user32(regs, entry)) +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index c14f2a74b2be..15450a675031 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) + return flags; + } + +-static inline void native_restore_fl(unsigned long flags) ++extern inline void native_restore_fl(unsigned long flags); ++extern inline void native_restore_fl(unsigned long flags) + { + asm volatile("push %0 ; popf" + : /* no output */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 0e856c0628b3..b12c8d70dd33 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -132,6 +132,8 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + u32 microcode; ++ /* Address space bits used by the cache internally */ ++ u8 x86_cache_bits; + } __randomize_layout; + + struct cpuid_regs { +@@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op; + + extern void cpu_detect(struct cpuinfo_x86 *c); + +-static inline unsigned long l1tf_pfn_limit(void) ++static inline unsigned long long l1tf_pfn_limit(void) + { +- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); + } + + extern void early_cpu_init(void); +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 875ca99b82ee..5f00ecb9d251 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -175,8 +175,16 @@ struct tlb_state { + * are on. This means that it may not match current->active_mm, + * which will contain the previous user mm when we're in lazy TLB + * mode even if we've already switched back to swapper_pg_dir. ++ * ++ * During switch_mm_irqs_off(), loaded_mm will be set to ++ * LOADED_MM_SWITCHING during the brief interrupts-off window ++ * when CR3 and loaded_mm would otherwise be inconsistent. This ++ * is for nmi_uaccess_okay()'s benefit. + */ + struct mm_struct *loaded_mm; ++ ++#define LOADED_MM_SWITCHING ((struct mm_struct *)1) ++ + u16 loaded_mm_asid; + u16 next_asid; + /* last user mm's ctx id */ +@@ -246,6 +254,38 @@ struct tlb_state { + }; + DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); + ++/* ++ * Blindly accessing user memory from NMI context can be dangerous ++ * if we're in the middle of switching the current user task or ++ * switching the loaded mm. It can also be dangerous if we ++ * interrupted some kernel code that was temporarily using a ++ * different mm. ++ */ ++static inline bool nmi_uaccess_okay(void) ++{ ++ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); ++ struct mm_struct *current_mm = current->mm; ++ ++ VM_WARN_ON_ONCE(!loaded_mm); ++ ++ /* ++ * The condition we want to check is ++ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, ++ * if we're running in a VM with shadow paging, and nmi_uaccess_okay() ++ * is supposed to be reasonably fast. ++ * ++ * Instead, we check the almost equivalent but somewhat conservative ++ * condition below, and we rely on the fact that switch_mm_irqs_off() ++ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. ++ */ ++ if (loaded_mm != current_mm) ++ return false; ++ ++ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); ++ ++ return true; ++} ++ + /* Initialize cr4 shadow for this CPU. */ + static inline void cr4_init_shadow(void) + { +diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h +index 52250681f68c..d92ccff4e615 100644 +--- a/arch/x86/include/asm/vgtod.h ++++ b/arch/x86/include/asm/vgtod.h +@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) + * + * If RDPID is available, use it. + */ +- alternative_io ("lsl %[p],%[seg]", ++ alternative_io ("lsl %[seg],%[p]", + ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ + X86_FEATURE_RDPID, + [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index d07addb99b71..3e435f88621d 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + ++/* ++ * These CPUs all support 44bits physical address space internally in the ++ * cache but CPUID can report a smaller number of physical address bits. ++ * ++ * The L1TF mitigation uses the top most address bit for the inversion of ++ * non present PTEs. When the installed memory reaches into the top most ++ * address bit due to memory holes, which has been observed on machines ++ * which report 36bits physical address bits and have 32G RAM installed, ++ * then the mitigation range check in l1tf_select_mitigation() triggers. ++ * This is a false positive because the mitigation is still possible due to ++ * the fact that the cache uses 44bit internally. Use the cache bits ++ * instead of the reported physical bits and adjust them on the affected ++ * machines to 44bit if the reported bits are less than 44. ++ */ ++static void override_cache_bits(struct cpuinfo_x86 *c) ++{ ++ if (c->x86 != 6) ++ return; ++ ++ switch (c->x86_model) { ++ case INTEL_FAM6_NEHALEM: ++ case INTEL_FAM6_WESTMERE: ++ case INTEL_FAM6_SANDYBRIDGE: ++ case INTEL_FAM6_IVYBRIDGE: ++ case INTEL_FAM6_HASWELL_CORE: ++ case INTEL_FAM6_HASWELL_ULT: ++ case INTEL_FAM6_HASWELL_GT3E: ++ case INTEL_FAM6_BROADWELL_CORE: ++ case INTEL_FAM6_BROADWELL_GT3E: ++ case INTEL_FAM6_SKYLAKE_MOBILE: ++ case INTEL_FAM6_SKYLAKE_DESKTOP: ++ case INTEL_FAM6_KABYLAKE_MOBILE: ++ case INTEL_FAM6_KABYLAKE_DESKTOP: ++ if (c->x86_cache_bits < 44) ++ c->x86_cache_bits = 44; ++ break; ++ } ++} ++ + static void __init l1tf_select_mitigation(void) + { + u64 half_pa; +@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void) + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + ++ override_cache_bits(&boot_cpu_data); ++ + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: +@@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void) + return; + #endif + +- /* +- * This is extremely unlikely to happen because almost all +- * systems have far more MAX_PA/2 than RAM can be fit into +- * DIMM slots. +- */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); ++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", ++ half_pa); ++ pr_info("However, doing so will make a part of your RAM unusable.\n"); ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); + return; + } + +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index dd02ee4fa8cd..7d2a7890a823 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + } + } + #endif ++ c->x86_cache_bits = c->x86_phys_bits; + } + + static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 278be092b300..574dcdc092ab 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + ++ if (c->x86 != 6) ++ return false; ++ + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index a2d8a3908670..224de37821e4 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -17,6 +17,7 @@ + #include <linux/bug.h> + #include <linux/nmi.h> + #include <linux/sysfs.h> ++#include <linux/kasan.h> + + #include <asm/cpu_entry_area.h> + #include <asm/stacktrace.h> +@@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) + * We're not going to return, but we might be on an IST stack or + * have very little stack space left. Rewind the stack and kill + * the task. ++ * Before we rewind the stack, we have to tell KASAN that we're going to ++ * reuse the task stack and that existing poisons are invalid. + */ ++ kasan_unpoison_task_stack(current); + rewind_stack_do_exit(signr); + } + NOKPROBE_SYMBOL(oops_end); +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index fa093b77689f..cbeecfcc66d6 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -370,6 +370,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + start_thread_common(regs, new_ip, new_sp, + __USER_CS, __USER_DS, 0); + } ++EXPORT_SYMBOL_GPL(start_thread); + + #ifdef CONFIG_COMPAT + void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 282bbcbf3b6a..f6bebcec60b4 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -5067,8 +5067,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + + clgi(); + +- local_irq_enable(); +- + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there +@@ -5077,6 +5075,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + ++ local_irq_enable(); ++ + asm volatile ( + "push %%" _ASM_BP "; \n\t" + "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" +@@ -5199,12 +5199,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + +- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); +- + reload_tss(vcpu); + + local_irq_disable(); + ++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); ++ + vcpu->arch.cr2 = svm->vmcb->save.cr2; + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index f015ca3997d9..8958b35f6008 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8108,21 +8108,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) + /* Emulate the VMPTRST instruction */ + static int handle_vmptrst(struct kvm_vcpu *vcpu) + { +- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); +- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); +- gva_t vmcs_gva; ++ unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); ++ u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); ++ gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; + struct x86_exception e; ++ gva_t gva; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + +- if (get_vmx_mem_address(vcpu, exit_qualification, +- vmx_instruction_info, true, &vmcs_gva)) ++ if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) + return 1; + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ +- if (kvm_write_guest_virt_system(vcpu, vmcs_gva, +- (void *)&to_vmx(vcpu)->nested.current_vmptr, +- sizeof(u64), &e)) { ++ if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, ++ sizeof(gpa_t), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } +@@ -9171,9 +9170,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +-#define L1D_CACHE_ORDER 4 +-static void *vmx_l1d_flush_pages; +- + static void vmx_l1d_flush(struct kvm_vcpu *vcpu) + { + int size = PAGE_SIZE << L1D_CACHE_ORDER; +diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c +index c8c6ad0d58b8..3f435d7fca5e 100644 +--- a/arch/x86/lib/usercopy.c ++++ b/arch/x86/lib/usercopy.c +@@ -7,6 +7,8 @@ + #include <linux/uaccess.h> + #include <linux/export.h> + ++#include <asm/tlbflush.h> ++ + /* + * We rely on the nested NMI work to allow atomic faults from the NMI path; the + * nested NMI paths are careful to preserve CR2. +@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) + if (__range_not_ok(from, n, TASK_SIZE)) + return n; + ++ if (!nmi_uaccess_okay()) ++ return n; ++ + /* + * Even though this function is typically called from NMI/IRQ context + * disable pagefaults so that its behaviour is consistent even when +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 37f60dfd7e4e..94b8d90830d1 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -892,7 +892,7 @@ unsigned long max_swapfile_size(void) + + if (boot_cpu_has_bug(X86_BUG_L1TF)) { + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ +- unsigned long l1tf_limit = l1tf_pfn_limit() + 1; ++ unsigned long long l1tf_limit = l1tf_pfn_limit(); + /* + * We encode swap offsets also with 3 bits below those for pfn + * which makes the usable limit higher. +@@ -900,7 +900,7 @@ unsigned long max_swapfile_size(void) + #if CONFIG_PGTABLE_LEVELS > 2 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; + #endif +- pages = min_t(unsigned long, l1tf_limit, pages); ++ pages = min_t(unsigned long long, l1tf_limit, pages); + } + return pages; + } +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 5f4805d69aab..53f1c18b15bd 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -191,7 +191,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) + /* If it's real memory always allow */ + if (pfn_valid(pfn)) + return true; +- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) ++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + return false; + return true; + } +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 0c936435ea93..83a3f4c935fc 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -292,6 +292,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + ++ /* Let nmi_uaccess_okay() know that we're changing CR3. */ ++ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); ++ barrier(); ++ + if (need_flush) { + this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); +@@ -322,6 +326,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + if (next != &init_mm) + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + ++ /* Make sure we write CR3 before loaded_mm. */ ++ barrier(); ++ + this_cpu_write(cpu_tlbstate.loaded_mm, next); + this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); + } +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c +index 8e2e4757adcb..5a42ae4078c2 100644 +--- a/drivers/base/power/clock_ops.c ++++ b/drivers/base/power/clock_ops.c +@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); + int of_pm_clk_add_clks(struct device *dev) + { + struct clk **clks; +- unsigned int i, count; ++ int i, count; + int ret; + + if (!dev || !dev->of_node) +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 5feba04ab940..5e55d03d3d01 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -112,12 +112,16 @@ struct nbd_device { + struct task_struct *task_setup; + }; + ++#define NBD_CMD_REQUEUED 1 ++ + struct nbd_cmd { + struct nbd_device *nbd; ++ struct mutex lock; + int index; + int cookie; +- struct completion send_complete; + blk_status_t status; ++ unsigned long flags; ++ u32 cmd_cookie; + }; + + #if IS_ENABLED(CONFIG_DEBUG_FS) +@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd) + return disk_to_dev(nbd->disk); + } + ++static void nbd_requeue_cmd(struct nbd_cmd *cmd) ++{ ++ struct request *req = blk_mq_rq_from_pdu(cmd); ++ ++ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) ++ blk_mq_requeue_request(req, true); ++} ++ ++#define NBD_COOKIE_BITS 32 ++ ++static u64 nbd_cmd_handle(struct nbd_cmd *cmd) ++{ ++ struct request *req = blk_mq_rq_from_pdu(cmd); ++ u32 tag = blk_mq_unique_tag(req); ++ u64 cookie = cmd->cmd_cookie; ++ ++ return (cookie << NBD_COOKIE_BITS) | tag; ++} ++ ++static u32 nbd_handle_to_tag(u64 handle) ++{ ++ return (u32)handle; ++} ++ ++static u32 nbd_handle_to_cookie(u64 handle) ++{ ++ return (u32)(handle >> NBD_COOKIE_BITS); ++} ++ + static const char *nbdcmd_to_ascii(int cmd) + { + switch (cmd) { +@@ -306,6 +339,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, + } + config = nbd->config; + ++ if (!mutex_trylock(&cmd->lock)) ++ return BLK_EH_RESET_TIMER; ++ + if (config->num_connections > 1) { + dev_err_ratelimited(nbd_to_dev(nbd), + "Connection timed out, retrying\n"); +@@ -328,7 +364,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, + nbd_mark_nsock_dead(nbd, nsock, 1); + mutex_unlock(&nsock->tx_lock); + } +- blk_mq_requeue_request(req, true); ++ mutex_unlock(&cmd->lock); ++ nbd_requeue_cmd(cmd); + nbd_config_put(nbd); + return BLK_EH_NOT_HANDLED; + } +@@ -338,6 +375,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, + } + set_bit(NBD_TIMEDOUT, &config->runtime_flags); + cmd->status = BLK_STS_IOERR; ++ mutex_unlock(&cmd->lock); + sock_shutdown(nbd); + nbd_config_put(nbd); + +@@ -414,9 +452,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) + struct iov_iter from; + unsigned long size = blk_rq_bytes(req); + struct bio *bio; ++ u64 handle; + u32 type; + u32 nbd_cmd_flags = 0; +- u32 tag = blk_mq_unique_tag(req); + int sent = nsock->sent, skip = 0; + + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); +@@ -458,6 +496,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) + goto send_pages; + } + iov_iter_advance(&from, sent); ++ } else { ++ cmd->cmd_cookie++; + } + cmd->index = index; + cmd->cookie = nsock->cookie; +@@ -466,7 +506,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) + request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); + request.len = htonl(size); + } +- memcpy(request.handle, &tag, sizeof(tag)); ++ handle = nbd_cmd_handle(cmd); ++ memcpy(request.handle, &handle, sizeof(handle)); + + dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", + cmd, nbdcmd_to_ascii(type), +@@ -484,6 +525,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) + nsock->pending = req; + nsock->sent = sent; + } ++ set_bit(NBD_CMD_REQUEUED, &cmd->flags); + return BLK_STS_RESOURCE; + } + dev_err_ratelimited(disk_to_dev(nbd->disk), +@@ -525,6 +567,7 @@ send_pages: + */ + nsock->pending = req; + nsock->sent = sent; ++ set_bit(NBD_CMD_REQUEUED, &cmd->flags); + return BLK_STS_RESOURCE; + } + dev_err(disk_to_dev(nbd->disk), +@@ -557,10 +600,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) + struct nbd_reply reply; + struct nbd_cmd *cmd; + struct request *req = NULL; ++ u64 handle; + u16 hwq; + u32 tag; + struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; + struct iov_iter to; ++ int ret = 0; + + reply.magic = 0; + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); +@@ -578,8 +623,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) + return ERR_PTR(-EPROTO); + } + +- memcpy(&tag, reply.handle, sizeof(u32)); +- ++ memcpy(&handle, reply.handle, sizeof(handle)); ++ tag = nbd_handle_to_tag(handle); + hwq = blk_mq_unique_tag_to_hwq(tag); + if (hwq < nbd->tag_set.nr_hw_queues) + req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], +@@ -590,11 +635,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) + return ERR_PTR(-ENOENT); + } + cmd = blk_mq_rq_to_pdu(req); ++ ++ mutex_lock(&cmd->lock); ++ if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { ++ dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", ++ req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); ++ ret = -ENOENT; ++ goto out; ++ } ++ if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { ++ dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", ++ req); ++ ret = -ENOENT; ++ goto out; ++ } + if (ntohl(reply.error)) { + dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", + ntohl(reply.error)); + cmd->status = BLK_STS_IOERR; +- return cmd; ++ goto out; + } + + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); +@@ -619,18 +678,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) + if (nbd_disconnected(config) || + config->num_connections <= 1) { + cmd->status = BLK_STS_IOERR; +- return cmd; ++ goto out; + } +- return ERR_PTR(-EIO); ++ ret = -EIO; ++ goto out; + } + dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", + cmd, bvec.bv_len); + } +- } else { +- /* See the comment in nbd_queue_rq. */ +- wait_for_completion(&cmd->send_complete); + } +- return cmd; ++out: ++ mutex_unlock(&cmd->lock); ++ return ret ? ERR_PTR(ret) : cmd; + } + + static void recv_work(struct work_struct *work) +@@ -793,7 +852,7 @@ again: + */ + blk_mq_start_request(req); + if (unlikely(nsock->pending && nsock->pending != req)) { +- blk_mq_requeue_request(req, true); ++ nbd_requeue_cmd(cmd); + ret = 0; + goto out; + } +@@ -806,7 +865,7 @@ again: + dev_err_ratelimited(disk_to_dev(nbd->disk), + "Request send failed, requeueing\n"); + nbd_mark_nsock_dead(nbd, nsock, 1); +- blk_mq_requeue_request(req, true); ++ nbd_requeue_cmd(cmd); + ret = 0; + } + out: +@@ -830,7 +889,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, + * that the server is misbehaving (or there was an error) before we're + * done sending everything over the wire. + */ +- init_completion(&cmd->send_complete); ++ mutex_lock(&cmd->lock); ++ clear_bit(NBD_CMD_REQUEUED, &cmd->flags); + + /* We can be called directly from the user space process, which means we + * could possibly have signals pending so our sendmsg will fail. In +@@ -842,7 +902,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, + ret = BLK_STS_IOERR; + else if (!ret) + ret = BLK_STS_OK; +- complete(&cmd->send_complete); ++ mutex_unlock(&cmd->lock); + + return ret; + } +@@ -1446,6 +1506,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, + { + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); + cmd->nbd = set->driver_data; ++ cmd->flags = 0; ++ mutex_init(&cmd->lock); + return 0; + } + +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index bfc566d3f31a..8cfa10ab7abc 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -2542,7 +2542,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, + if (!CDROM_CAN(CDC_SELECT_DISC) || + (arg == CDSL_CURRENT || arg == CDSL_NONE)) + return cdi->ops->drive_status(cdi, CDSL_CURRENT); +- if (((int)arg >= cdi->capacity)) ++ if (arg >= cdi->capacity) + return -EINVAL; + return cdrom_slot_status(cdi, arg); + } +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c +index dba5259def60..86b526b7d990 100644 +--- a/drivers/char/tpm/tpm-interface.c ++++ b/drivers/char/tpm/tpm-interface.c +@@ -423,7 +423,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | + TSS2_RESMGR_TPM_RC_LAYER); +- return bufsiz; ++ return sizeof(*header); + } + + if (bufsiz > TPM_BUFSIZE) +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c +index 6847120b61cd..62d0a69f8da0 100644 +--- a/drivers/clk/rockchip/clk-rk3399.c ++++ b/drivers/clk/rockchip/clk-rk3399.c +@@ -630,7 +630,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { + MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(31), 0, 2, MFLAGS), + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT, +- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS, ++ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS, + RK3399_CLKGATE_CON(8), 12, GFLAGS), + + /* uart */ +diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c +index 02ba5f2aa0e6..cd777c75291d 100644 +--- a/drivers/crypto/vmx/aes_ctr.c ++++ b/drivers/crypto/vmx/aes_ctr.c +@@ -27,21 +27,23 @@ + #include <asm/switch_to.h> + #include <crypto/aes.h> + #include <crypto/scatterwalk.h> ++#include <crypto/skcipher.h> ++ + #include "aesp8-ppc.h" + + struct p8_aes_ctr_ctx { +- struct crypto_blkcipher *fallback; ++ struct crypto_skcipher *fallback; + struct aes_key enc_key; + }; + + static int p8_aes_ctr_init(struct crypto_tfm *tfm) + { + const char *alg = crypto_tfm_alg_name(tfm); +- struct crypto_blkcipher *fallback; ++ struct crypto_skcipher *fallback; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + +- fallback = +- crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); ++ fallback = crypto_alloc_skcipher(alg, 0, ++ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", +@@ -49,9 +51,9 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) + return PTR_ERR(fallback); + } + +- crypto_blkcipher_set_flags( ++ crypto_skcipher_set_flags( + fallback, +- crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); ++ crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); + ctx->fallback = fallback; + + return 0; +@@ -62,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm) + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { +- crypto_free_blkcipher(ctx->fallback); ++ crypto_free_skcipher(ctx->fallback); + ctx->fallback = NULL; + } + } +@@ -81,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, + pagefault_enable(); + preempt_enable(); + +- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); ++ ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); + return ret; + } + +@@ -115,15 +117,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, + struct blkcipher_walk walk; + struct p8_aes_ctr_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); +- struct blkcipher_desc fallback_desc = { +- .tfm = ctx->fallback, +- .info = desc->info, +- .flags = desc->flags +- }; + + if (in_interrupt()) { +- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, +- nbytes); ++ SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); ++ skcipher_request_set_tfm(req, ctx->fallback); ++ skcipher_request_set_callback(req, desc->flags, NULL, NULL); ++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); ++ ret = crypto_skcipher_encrypt(req); ++ skcipher_request_zero(req); + } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c +index d6f3d9ee1350..70b3c556f6cf 100644 +--- a/drivers/gpio/gpiolib-acpi.c ++++ b/drivers/gpio/gpiolib-acpi.c +@@ -25,6 +25,7 @@ + + struct acpi_gpio_event { + struct list_head node; ++ struct list_head initial_sync_list; + acpi_handle handle; + unsigned int pin; + unsigned int irq; +@@ -50,6 +51,9 @@ struct acpi_gpio_chip { + struct list_head events; + }; + ++static LIST_HEAD(acpi_gpio_initial_sync_list); ++static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); ++ + static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) + { + if (!gc->parent) +@@ -142,6 +146,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) + return gpiochip_get_desc(chip, offset); + } + ++static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) ++{ ++ mutex_lock(&acpi_gpio_initial_sync_list_lock); ++ list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock); ++} ++ ++static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) ++{ ++ mutex_lock(&acpi_gpio_initial_sync_list_lock); ++ if (!list_empty(&event->initial_sync_list)) ++ list_del_init(&event->initial_sync_list); ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock); ++} ++ + static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) + { + struct acpi_gpio_event *event = data; +@@ -193,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, + irq_handler_t handler = NULL; + struct gpio_desc *desc; + unsigned long irqflags; +- int ret, pin, irq; ++ int ret, pin, irq, value; + + if (!acpi_gpio_get_irq_resource(ares, &agpio)) + return AE_OK; +@@ -228,6 +247,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, + + gpiod_direction_input(desc); + ++ value = gpiod_get_value(desc); ++ + ret = gpiochip_lock_as_irq(chip, pin); + if (ret) { + dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); +@@ -269,6 +290,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, + event->irq = irq; + event->pin = pin; + event->desc = desc; ++ INIT_LIST_HEAD(&event->initial_sync_list); + + ret = request_threaded_irq(event->irq, NULL, handler, irqflags, + "ACPI:Event", event); +@@ -283,6 +305,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, + enable_irq_wake(irq); + + list_add_tail(&event->node, &acpi_gpio->events); ++ ++ /* ++ * Make sure we trigger the initial state of the IRQ when using RISING ++ * or FALLING. Note we run the handlers on late_init, the AML code ++ * may refer to OperationRegions from other (builtin) drivers which ++ * may be probed after us. ++ */ ++ if (handler == acpi_gpio_irq_handler && ++ (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || ++ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) ++ acpi_gpio_add_to_initial_sync_list(event); ++ + return AE_OK; + + fail_free_event: +@@ -355,6 +389,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { + struct gpio_desc *desc; + ++ acpi_gpio_del_from_initial_sync_list(event); ++ + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) + disable_irq_wake(event->irq); + +@@ -1210,3 +1246,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) + + return con_id == NULL; + } ++ ++/* Sync the initial state of handlers after all builtin drivers have probed */ ++static int acpi_gpio_initial_sync(void) ++{ ++ struct acpi_gpio_event *event, *ep; ++ ++ mutex_lock(&acpi_gpio_initial_sync_list_lock); ++ list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, ++ initial_sync_list) { ++ acpi_evaluate_object(event->handle, NULL, NULL, NULL); ++ list_del_init(&event->initial_sync_list); ++ } ++ mutex_unlock(&acpi_gpio_initial_sync_list_lock); ++ ++ return 0; ++} ++/* We must use _sync so that this runs after the first deferred_probe run */ ++late_initcall_sync(acpi_gpio_initial_sync); +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +index b2431aee7887..f5091827628a 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +@@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work) + else + status = connector_status_disconnected; + ++ /* ++ * The bridge resets its registers on unplug. So when we get a plug ++ * event and we're already supposed to be powered, cycle the bridge to ++ * restore its state. ++ */ ++ if (status == connector_status_connected && ++ adv7511->connector.status == connector_status_disconnected && ++ adv7511->powered) { ++ regcache_mark_dirty(adv7511->regmap); ++ adv7511_power_on(adv7511); ++ } ++ + if (adv7511->connector.status != status) { + adv7511->connector.status = status; + drm_kms_helper_hotplug_event(adv7511->connector.dev); +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c +index 56dd7a9a8e25..dd5312b02a8d 100644 +--- a/drivers/gpu/drm/imx/imx-ldb.c ++++ b/drivers/gpu/drm/imx/imx-ldb.c +@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + return PTR_ERR(imx_ldb->regmap); + } + ++ /* disable LDB by resetting the control register to POR default */ ++ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); ++ + imx_ldb->dev = dev; + + if (of_id) +@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + if (ret || i < 0 || i > 1) + return -EINVAL; + ++ if (!of_device_is_available(child)) ++ continue; ++ + if (dual && i > 0) { + dev_warn(dev, "dual-channel mode, ignoring second output\n"); + continue; + } + +- if (!of_device_is_available(child)) +- continue; +- + channel = &imx_ldb->channel[i]; + channel->ldb = imx_ldb; + channel->chno = i; +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h +index 2a75ab80527a..2c149b841cf1 100644 +--- a/drivers/gpu/drm/udl/udl_drv.h ++++ b/drivers/gpu/drm/udl/udl_drv.h +@@ -110,7 +110,7 @@ udl_fb_user_fb_create(struct drm_device *dev, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); + +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, + const char *front, char **urb_buf_ptr, + u32 byte_offset, u32 device_byte_offset, u32 byte_width, + int *ident_ptr, int *sent_ptr); +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index d5583190f3e4..8746eeeec44d 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, + int bytes_identical = 0; + struct urb *urb; + int aligned_x; +- int bpp = fb->base.format->cpp[0]; ++ int log_bpp; ++ ++ BUG_ON(!is_power_of_2(fb->base.format->cpp[0])); ++ log_bpp = __ffs(fb->base.format->cpp[0]); + + if (!fb->active_16) + return 0; +@@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, + + for (i = y; i < y + height ; i++) { + const int line_offset = fb->base.pitches[0] * i; +- const int byte_offset = line_offset + (x * bpp); +- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); +- if (udl_render_hline(dev, bpp, &urb, ++ const int byte_offset = line_offset + (x << log_bpp); ++ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp; ++ if (udl_render_hline(dev, log_bpp, &urb, + (char *) fb->obj->vmapping, + &cmd, byte_offset, dev_byte_offset, +- width * bpp, ++ width << log_bpp, + &bytes_identical, &bytes_sent)) + goto error; + } +@@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, + error: + atomic_add(bytes_sent, &udl->bytes_sent); + atomic_add(bytes_identical, &udl->bytes_identical); +- atomic_add(width*height*bpp, &udl->bytes_rendered); ++ atomic_add((width * height) << log_bpp, &udl->bytes_rendered); + end_cycles = get_cycles(); + atomic_add(((unsigned int) ((end_cycles - start_cycles) + >> 10)), /* Kcycles */ +@@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user) + + struct fb_deferred_io *fbdefio; + +- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); ++ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + + if (fbdefio) { + fbdefio->delay = DL_DEFIO_WRITE_DELAY; +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c +index 0328b2c7b210..f8ea3c99b523 100644 +--- a/drivers/gpu/drm/udl/udl_main.c ++++ b/drivers/gpu/drm/udl/udl_main.c +@@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev) + struct list_head *node; + struct urb_node *unode; + struct urb *urb; +- int ret; + unsigned long flags; + + DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); + + /* keep waiting and freeing, until we've got 'em all */ + while (count--) { +- +- /* Getting interrupted means a leak, but ok at shutdown*/ +- ret = down_interruptible(&udl->urbs.limit_sem); +- if (ret) +- break; ++ down(&udl->urbs.limit_sem); + + spin_lock_irqsave(&udl->urbs.lock, flags); + +@@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev) + static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + { + struct udl_device *udl = dev->dev_private; +- int i = 0; + struct urb *urb; + struct urb_node *unode; + char *buf; ++ size_t wanted_size = count * size; + + spin_lock_init(&udl->urbs.lock); + ++retry: + udl->urbs.size = size; + INIT_LIST_HEAD(&udl->urbs.list); + +- while (i < count) { ++ sema_init(&udl->urbs.limit_sem, 0); ++ udl->urbs.count = 0; ++ udl->urbs.available = 0; ++ ++ while (udl->urbs.count * size < wanted_size) { + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); + if (!unode) + break; +@@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + } + unode->urb = urb; + +- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL, ++ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, + &urb->transfer_dma); + if (!buf) { + kfree(unode); + usb_free_urb(urb); ++ if (size > PAGE_SIZE) { ++ size /= 2; ++ udl_free_urb_list(dev); ++ goto retry; ++ } + break; + } + +@@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + + list_add_tail(&unode->entry, &udl->urbs.list); + +- i++; ++ up(&udl->urbs.limit_sem); ++ udl->urbs.count++; ++ udl->urbs.available++; + } + +- sema_init(&udl->urbs.limit_sem, i); +- udl->urbs.count = i; +- udl->urbs.available = i; +- +- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); ++ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); + +- return i; ++ return udl->urbs.count; + } + + struct urb *udl_get_urb(struct drm_device *dev) +diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c +index b992644c17e6..f3331d33547a 100644 +--- a/drivers/gpu/drm/udl/udl_transfer.c ++++ b/drivers/gpu/drm/udl/udl_transfer.c +@@ -83,12 +83,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel) + ((pixel >> 8) & 0xf800)); + } + +-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp) ++static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp) + { +- u16 pixel_val16 = 0; +- if (bpp == 2) ++ u16 pixel_val16; ++ if (log_bpp == 1) + pixel_val16 = *(const uint16_t *)pixel; +- else if (bpp == 4) ++ else + pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel); + return pixel_val16; + } +@@ -125,8 +125,9 @@ static void udl_compress_hline16( + const u8 *const pixel_end, + uint32_t *device_address_ptr, + uint8_t **command_buffer_ptr, +- const uint8_t *const cmd_buffer_end, int bpp) ++ const uint8_t *const cmd_buffer_end, int log_bpp) + { ++ const int bpp = 1 << log_bpp; + const u8 *pixel = *pixel_start_ptr; + uint32_t dev_addr = *device_address_ptr; + uint8_t *cmd = *command_buffer_ptr; +@@ -153,12 +154,12 @@ static void udl_compress_hline16( + raw_pixels_count_byte = cmd++; /* we'll know this later */ + raw_pixel_start = pixel; + +- cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, +- (unsigned long)(pixel_end - pixel) / bpp, +- (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; ++ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL, ++ (unsigned long)(pixel_end - pixel) >> log_bpp, ++ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp); + + prefetch_range((void *) pixel, cmd_pixel_end - pixel); +- pixel_val16 = get_pixel_val16(pixel, bpp); ++ pixel_val16 = get_pixel_val16(pixel, log_bpp); + + while (pixel < cmd_pixel_end) { + const u8 *const start = pixel; +@@ -170,7 +171,7 @@ static void udl_compress_hline16( + pixel += bpp; + + while (pixel < cmd_pixel_end) { +- pixel_val16 = get_pixel_val16(pixel, bpp); ++ pixel_val16 = get_pixel_val16(pixel, log_bpp); + if (pixel_val16 != repeating_pixel_val16) + break; + pixel += bpp; +@@ -179,10 +180,10 @@ static void udl_compress_hline16( + if (unlikely(pixel > start + bpp)) { + /* go back and fill in raw pixel count */ + *raw_pixels_count_byte = (((start - +- raw_pixel_start) / bpp) + 1) & 0xFF; ++ raw_pixel_start) >> log_bpp) + 1) & 0xFF; + + /* immediately after raw data is repeat byte */ +- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF; ++ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF; + + /* Then start another raw pixel span */ + raw_pixel_start = pixel; +@@ -192,14 +193,14 @@ static void udl_compress_hline16( + + if (pixel > raw_pixel_start) { + /* finalize last RAW span */ +- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; ++ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF; + } else { + /* undo unused byte */ + cmd--; + } + +- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; +- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2; ++ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF; ++ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2; + } + + if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { +@@ -222,19 +223,19 @@ static void udl_compress_hline16( + * (that we can only write to, slowly, and can never read), and (optionally) + * our shadow copy that tracks what's been sent to that hardware buffer. + */ +-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, ++int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, + const char *front, char **urb_buf_ptr, + u32 byte_offset, u32 device_byte_offset, + u32 byte_width, + int *ident_ptr, int *sent_ptr) + { + const u8 *line_start, *line_end, *next_pixel; +- u32 base16 = 0 + (device_byte_offset / bpp) * 2; ++ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2; + struct urb *urb = *urb_ptr; + u8 *cmd = *urb_buf_ptr; + u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; + +- BUG_ON(!(bpp == 2 || bpp == 4)); ++ BUG_ON(!(log_bpp == 1 || log_bpp == 2)); + + line_start = (u8 *) (front + byte_offset); + next_pixel = line_start; +@@ -244,7 +245,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, + + udl_compress_hline16(&next_pixel, + line_end, &base16, +- (u8 **) &cmd, (u8 *) cmd_end, bpp); ++ (u8 **) &cmd, (u8 *) cmd_end, log_bpp); + + if (cmd >= cmd_end) { + int len = cmd - (u8 *) urb->transfer_buffer; +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c +index 5f87764d7015..ca9941fa741b 100644 +--- a/drivers/hwmon/nct6775.c ++++ b/drivers/hwmon/nct6775.c +@@ -63,6 +63,7 @@ + #include <linux/bitops.h> + #include <linux/dmi.h> + #include <linux/io.h> ++#include <linux/nospec.h> + #include "lm75.h" + + #define USE_ALTERNATE +@@ -2642,6 +2643,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, + return err; + if (val > NUM_TEMP) + return -EINVAL; ++ val = array_index_nospec(val, NUM_TEMP + 1); + if (val && (!(data->have_temp & BIT(val - 1)) || + !data->temp_src[val - 1])) + return -EINVAL; +diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c +index b8c43535f16c..5cf670f57be7 100644 +--- a/drivers/i2c/busses/i2c-davinci.c ++++ b/drivers/i2c/busses/i2c-davinci.c +@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) + /* + * It's not always possible to have 1 to 2 ratio when d=7, so fall back + * to minimal possible clkh in this case. ++ * ++ * Note: ++ * CLKH is not allowed to be 0, in this case I2C clock is not generated ++ * at all + */ +- if (clk >= clkl + d) { ++ if (clk > clkl + d) { + clkh = clk - clkl - d; + clkl -= d; + } else { +- clkh = 0; ++ clkh = 1; + clkl = clk - (d << 1); + } + +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c +index 6f2fe63e8f5a..7b961c9c62ef 100644 +--- a/drivers/i2c/i2c-core-base.c ++++ b/drivers/i2c/i2c-core-base.c +@@ -638,7 +638,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) + static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, + unsigned int flags) + { +- rt_mutex_lock(&adapter->bus_lock); ++ rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); + } + + /** +diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c +index 9669ca4937b8..7ba31f6bf148 100644 +--- a/drivers/i2c/i2c-mux.c ++++ b/drivers/i2c/i2c-mux.c +@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags) + struct i2c_mux_priv *priv = adapter->algo_data; + struct i2c_adapter *parent = priv->muxc->parent; + +- rt_mutex_lock(&parent->mux_lock); ++ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); + if (!(flags & I2C_LOCK_ROOT_ADAPTER)) + return; + i2c_lock_bus(parent, flags); +@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter, + struct i2c_mux_priv *priv = adapter->algo_data; + struct i2c_adapter *parent = priv->muxc->parent; + +- rt_mutex_lock(&parent->mux_lock); ++ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); + i2c_lock_bus(parent, flags); + } + +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c +index 3bdb799d3b4b..2c436376f13e 100644 +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -2100,12 +2100,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + if (err) + return err; + +- if (smmu->version == ARM_SMMU_V2 && +- smmu->num_context_banks != smmu->num_context_irqs) { +- dev_err(dev, +- "found only %d context interrupt(s) but %d required\n", +- smmu->num_context_irqs, smmu->num_context_banks); +- return -ENODEV; ++ if (smmu->version == ARM_SMMU_V2) { ++ if (smmu->num_context_banks > smmu->num_context_irqs) { ++ dev_err(dev, ++ "found only %d context irq(s) but %d required\n", ++ smmu->num_context_irqs, smmu->num_context_banks); ++ return -ENODEV; ++ } ++ ++ /* Ignore superfluous interrupts */ ++ smmu->num_context_irqs = smmu->num_context_banks; + } + + for (i = 0; i < smmu->num_global_irqs; ++i) { +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 22efc039f302..8d1d40dbf744 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -291,7 +291,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, + goto out; + } + +- *offset = 0; + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); + if (!cb) { + rets = -ENOMEM; +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index ca3fa82316c2..d3ce904e929e 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -1637,8 +1637,6 @@ static int m_can_plat_probe(struct platform_device *pdev) + priv->can.clock.freq = clk_get_rate(cclk); + priv->mram_base = mram_addr; + +- m_can_of_parse_mram(priv, mram_config_vals); +- + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + +@@ -1649,6 +1647,8 @@ static int m_can_plat_probe(struct platform_device *pdev) + goto failed_free_dev; + } + ++ m_can_of_parse_mram(priv, mram_config_vals); ++ + devm_can_led_init(dev); + + dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", +@@ -1698,8 +1698,6 @@ static __maybe_unused int m_can_resume(struct device *dev) + + pinctrl_pm_select_default_state(dev); + +- m_can_init_ram(priv); +- + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { +@@ -1709,6 +1707,7 @@ static __maybe_unused int m_can_resume(struct device *dev) + if (ret) + return ret; + ++ m_can_init_ram(priv); + m_can_start(ndev); + netif_device_attach(ndev); + netif_start_queue(ndev); +diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c +index c7427bdd3a4b..2949a381a94d 100644 +--- a/drivers/net/can/mscan/mpc5xxx_can.c ++++ b/drivers/net/can/mscan/mpc5xxx_can.c +@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, + return 0; + } + cdm = of_iomap(np_cdm, 0); ++ if (!cdm) { ++ of_node_put(np_cdm); ++ dev_err(&ofdev->dev, "can't map clock node!\n"); ++ return 0; ++ } + + if (in_8(&cdm->ipb_clk_sel) & 0x1) + freq *= 2; +diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig +index 5b7658bcf020..5c3ef9fc8207 100644 +--- a/drivers/net/ethernet/3com/Kconfig ++++ b/drivers/net/ethernet/3com/Kconfig +@@ -32,7 +32,7 @@ config EL3 + + config 3C515 + tristate "3c515 ISA \"Fast EtherLink\"" +- depends on ISA && ISA_DMA_API ++ depends on ISA && ISA_DMA_API && !PPC32 + ---help--- + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet + network card, say Y here. +diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig +index d5c15e8bb3de..a8e8f4e9c1bb 100644 +--- a/drivers/net/ethernet/amd/Kconfig ++++ b/drivers/net/ethernet/amd/Kconfig +@@ -44,7 +44,7 @@ config AMD8111_ETH + + config LANCE + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + Some LinkSys cards are of this type. +@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN + + config NI65 + tristate "NI6510 support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +index 8c9986f3fc01..3615c2a06fda 100644 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -1685,6 +1685,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { ++ skb_reserve(skb, NET_SKB_PAD); + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +index 1e33abde4a3e..3fd1085a093f 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +@@ -3387,14 +3387,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } else if ((info->flow_type == UDP_V6_FLOW) && + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } + return 0; + +@@ -3508,7 +3512,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + } + +- return bnx2x_config_rss_eth(bp, false); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_config_rss_eth(bp, false); ++ ++ return 0; + } + + /** +diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig +index 5ab912937aff..ec0b545197e2 100644 +--- a/drivers/net/ethernet/cirrus/Kconfig ++++ b/drivers/net/ethernet/cirrus/Kconfig +@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS + config CS89x0 + tristate "CS89x0 support" + depends on ISA || EISA || ARM ++ depends on !PPC32 + ---help--- + Support for CS89x0 chipset based Ethernet cards. If you have a + network (Ethernet) card of this type, say Y and read the file +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 800edfbd36c1..2bfaf3e118b1 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -2007,28 +2007,42 @@ static int enic_stop(struct net_device *netdev) + return 0; + } + ++static int _enic_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ bool running = netif_running(netdev); ++ int err = 0; ++ ++ ASSERT_RTNL(); ++ if (running) { ++ err = enic_stop(netdev); ++ if (err) ++ return err; ++ } ++ ++ netdev->mtu = new_mtu; ++ ++ if (running) { ++ err = enic_open(netdev); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ + static int enic_change_mtu(struct net_device *netdev, int new_mtu) + { + struct enic *enic = netdev_priv(netdev); +- int running = netif_running(netdev); + + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + return -EOPNOTSUPP; + +- if (running) +- enic_stop(netdev); +- +- netdev->mtu = new_mtu; +- + if (netdev->mtu > enic->port_mtu) + netdev_warn(netdev, +- "interface MTU (%d) set higher than port MTU (%d)\n", +- netdev->mtu, enic->port_mtu); ++ "interface MTU (%d) set higher than port MTU (%d)\n", ++ netdev->mtu, enic->port_mtu); + +- if (running) +- enic_open(netdev); +- +- return 0; ++ return _enic_change_mtu(netdev, new_mtu); + } + + static void enic_change_mtu_work(struct work_struct *work) +@@ -2036,47 +2050,9 @@ static void enic_change_mtu_work(struct work_struct *work) + struct enic *enic = container_of(work, struct enic, change_mtu_work); + struct net_device *netdev = enic->netdev; + int new_mtu = vnic_dev_mtu(enic->vdev); +- int err; +- unsigned int i; +- +- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); + + rtnl_lock(); +- +- /* Stop RQ */ +- del_timer_sync(&enic->notify_timer); +- +- for (i = 0; i < enic->rq_count; i++) +- napi_disable(&enic->napi[i]); +- +- vnic_intr_mask(&enic->intr[0]); +- enic_synchronize_irqs(enic); +- err = vnic_rq_disable(&enic->rq[0]); +- if (err) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to disable RQ.\n"); +- return; +- } +- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); +- vnic_cq_clean(&enic->cq[0]); +- vnic_intr_clean(&enic->intr[0]); +- +- /* Fill RQ with new_mtu-sized buffers */ +- netdev->mtu = new_mtu; +- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); +- /* Need at least one buffer on ring to get going */ +- if (vnic_rq_desc_used(&enic->rq[0]) == 0) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to alloc receive buffers.\n"); +- return; +- } +- +- /* Start RQ */ +- vnic_rq_enable(&enic->rq[0]); +- napi_enable(&enic->napi[0]); +- vnic_intr_unmask(&enic->intr[0]); +- enic_notify_timer_start(enic); +- ++ (void)_enic_change_mtu(netdev, new_mtu); + rtnl_unlock(); + + netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); +@@ -2867,7 +2843,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + */ + + enic->port_mtu = enic->config.mtu; +- (void)enic_change_mtu(netdev, enic->port_mtu); + + err = enic_set_mac_addr(netdev, enic->mac_addr); + if (err) { +@@ -2954,6 +2929,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + /* MTU range: 68 - 9000 */ + netdev->min_mtu = ENIC_MIN_MTU; + netdev->max_mtu = ENIC_MAX_MTU; ++ netdev->mtu = enic->port_mtu; + + err = register_netdev(netdev); + if (err) { +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c +index eb53bd93065e..a696b5b2d40e 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c +@@ -981,6 +981,7 @@ static int nic_dev_init(struct pci_dev *pdev) + hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, + nic_dev, link_status_event_handler); + ++ SET_NETDEV_DEV(netdev, &pdev->dev); + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); +diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c +index 91fe03617106..72496060e332 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/main.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c +@@ -79,7 +79,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) + return NFP_REPR_TYPE_VF; + } + +- return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; ++ return __NFP_REPR_TYPE_MAX; + } + + static struct net_device * +@@ -90,6 +90,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) + u8 port = 0; + + repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); ++ if (repr_type > NFP_REPR_TYPE_MAX) ++ return NULL; + + reprs = rcu_dereference(app->reprs[repr_type]); + if (!reprs) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c +index c5452b445c37..83c1c4fa102b 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c +@@ -663,7 +663,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { +- u32 *p_bins = (u32 *)p_params->bins; ++ u32 *p_bins = p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); + } +@@ -1474,8 +1474,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) + { +- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct vport_update_ramrod_data *p_ramrod = NULL; ++ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; +@@ -1511,26 +1511,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); +- memset(bins, 0, sizeof(unsigned long) * +- ETH_MULTICAST_MAC_BINS_IN_REGS); ++ memset(bins, 0, sizeof(bins)); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { +- u32 bit; ++ u32 bit, nbits; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, bins); ++ nbits = sizeof(u32) * BITS_PER_BYTE; ++ bins[bit / nbits] |= 1 << (bit % nbits); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; +- u32 *p_bins = (u32 *)bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; +- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); ++ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h +index cc1f248551c9..91d383f3a661 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h +@@ -214,7 +214,7 @@ struct qed_sp_vport_update_params { + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; +- unsigned long bins[8]; ++ u32 bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +index 376485d99357..3c469355f5a4 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +@@ -1182,6 +1182,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + break; + default: + p_link->speed = 0; ++ p_link->link_up = 0; + } + + if (p_link->link_up && p_link->speed) +@@ -1279,9 +1280,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) + phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; +- if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { +- if (params->eee.enable) +- phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; ++ ++ /* There are MFWs that share this capability regardless of whether ++ * this is feasible or not. And given that at the very least adv_caps ++ * would be set internally by qed, we want to make sure LFA would ++ * still work. ++ */ ++ if ((p_hwfn->mcp_info->capabilities & ++ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { ++ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & QED_EEE_1G_ADV) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +index d08fe350ab6c..c6411158afd7 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +@@ -2826,7 +2826,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index 91b5e9f02a62..6eb85db69f9a 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + resp_size += sizeof(struct pfvf_def_resp_tlv); + + memcpy(p_mcast_tlv->bins, p_params->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; +@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, sp_params.bins); ++ sp_params.bins[bit / 32] |= 1 << (bit % 32); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h +index 97d44dfb38ca..1e93c712fa34 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h +@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + +- u64 bins[8]; ++ /* There are only 256 approx bins, and in HSI they're divided into ++ * 32-bit values. As old VFs used to set-bit to the values on its side, ++ * the upper half of the array is never expected to contain any data. ++ */ ++ u64 bins[4]; ++ u64 obsolete_bins[4]; + }; + + struct vfpf_vport_update_accept_param_tlv { +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +index 16c3bfbe1992..757a3b37ae8a 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +@@ -218,6 +218,7 @@ issue: + ret = of_mdiobus_register(bus, np1); + if (ret) { + mdiobus_free(bus); ++ lp->mii_bus = NULL; + return ret; + } + return 0; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 6d3811c869fd..31684f3382f6 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1245,7 +1245,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ +- {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ +diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c +index 4698450c77d1..bb43d176eb4e 100644 +--- a/drivers/net/wan/lmc/lmc_main.c ++++ b/drivers/net/wan/lmc/lmc_main.c +@@ -1371,7 +1371,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ + case 0x001: + printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); + break; +- case 0x010: ++ case 0x002: + printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); + break; + default: +diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c +index cb987c2ecc6b..87131f663292 100644 +--- a/drivers/net/wireless/broadcom/b43/leds.c ++++ b/drivers/net/wireless/broadcom/b43/leds.c +@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + led->wl = dev->wl; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + atomic_set(&led->state, 0); + + led->led_dev.name = led->name; +diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c +index fd4565389c77..bc922118b6ac 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/leds.c ++++ b/drivers/net/wireless/broadcom/b43legacy/leds.c +@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev, + led->dev = dev; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index a67d03716510..afb99876fa9e 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -306,6 +306,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, + old_value = *dbbuf_db; + *dbbuf_db = value; + ++ /* ++ * Ensure that the doorbell is updated before reading the event ++ * index from memory. The controller needs to provide similar ++ * ordering to ensure the envent index is updated before reading ++ * the doorbell. ++ */ ++ mb(); ++ + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) + return false; + } +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index a4e9f430d452..e2cca91fd266 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, + const char *name; + int i, ret; + +- if (group > info->ngroups) ++ if (group >= info->ngroups) + return; + + seq_puts(s, "\n"); +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index fe98d4ac0df3..e1e7e587b45b 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -1097,10 +1097,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { + }, + }, + { +- .ident = "Lenovo Legion Y520-15IKBN", ++ .ident = "Lenovo Legion Y520-15IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), +- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"), + }, + }, + { +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c +index 37e523374fe0..371b5ec70087 100644 +--- a/drivers/power/supply/generic-adc-battery.c ++++ b/drivers/power/supply/generic-adc-battery.c +@@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev) + struct power_supply_desc *psy_desc; + struct power_supply_config psy_cfg = {}; + struct gab_platform_data *pdata = pdev->dev.platform_data; +- enum power_supply_property *properties; + int ret = 0; + int chan; +- int index = 0; ++ int index = ARRAY_SIZE(gab_props); ++ bool any = false; + + adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL); + if (!adc_bat) { +@@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev) + } + + memcpy(psy_desc->properties, gab_props, sizeof(gab_props)); +- properties = (enum power_supply_property *) +- ((char *)psy_desc->properties + sizeof(gab_props)); + + /* + * getting channel from iio and copying the battery properties +@@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev) + adc_bat->channel[chan] = NULL; + } else { + /* copying properties for supported channels only */ +- memcpy(properties + sizeof(*(psy_desc->properties)) * index, +- &gab_dyn_props[chan], +- sizeof(gab_dyn_props[chan])); +- index++; ++ int index2; ++ ++ for (index2 = 0; index2 < index; index2++) { ++ if (psy_desc->properties[index2] == ++ gab_dyn_props[chan]) ++ break; /* already known */ ++ } ++ if (index2 == index) /* really new */ ++ psy_desc->properties[index++] = ++ gab_dyn_props[chan]; ++ any = true; + } + } + + /* none of the channels are supported so let's bail out */ +- if (index == 0) { ++ if (!any) { + ret = -ENODEV; + goto second_mem_fail; + } +@@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev) + * as come channels may be not be supported by the device.So + * we need to take care of that. + */ +- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index; ++ psy_desc->num_properties = index; + + adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg); + if (IS_ERR(adc_bat->psy)) { +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c +index 8941e7caaf4d..c7afdbded26b 100644 +--- a/drivers/s390/cio/qdio_main.c ++++ b/drivers/s390/cio/qdio_main.c +@@ -641,21 +641,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + unsigned long phys_aob = 0; + + if (!q->use_cq) +- goto out; ++ return 0; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { +- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + WARN_ON_ONCE(phys_aob & 0xFF); + } + +-out: ++ q->sbal_state[bufnr].flags = 0; + return phys_aob; + } + +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index fff6f1851dc1..03019e07abb9 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + case ELS_LOGO: + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) +- return -EINVAL; ++ goto drop; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) +- return -EINVAL; ++ goto drop; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; +@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + fip->send(fip, skb); + return -EINPROGRESS; + drop: +- kfree_skb(skb); + LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); ++ kfree_skb(skb); + return -EINVAL; + } + EXPORT_SYMBOL(fcoe_ctlr_els_send); +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c +index 31d31aad3de1..89b1f1af2fd4 100644 +--- a/drivers/scsi/libfc/fc_rport.c ++++ b/drivers/scsi/libfc/fc_rport.c +@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + ++ rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_STOP); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index bddbe2da5283..cf8a15e54d83 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] " + "rejected.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + /* +@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (conn->session->fast_abort) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] fast abort.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + break; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c +index d3940c5d079d..63dd9bc21ff2 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c +@@ -1936,12 +1936,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + rc = -EFAULT; +- goto out; ++ goto job_done; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) +- goto out; ++ goto job_done; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, +@@ -2066,6 +2066,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); ++job_done: + bsg_job_done(job, rc, reslen); + } + +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 40406c162d0d..8ce12ffcbb7a 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -721,8 +721,24 @@ static ssize_t + sdev_store_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- if (device_remove_file_self(dev, attr)) +- scsi_remove_device(to_scsi_device(dev)); ++ struct kernfs_node *kn; ++ ++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); ++ WARN_ON_ONCE(!kn); ++ /* ++ * Concurrent writes into the "delete" sysfs attribute may trigger ++ * concurrent calls to device_remove_file() and scsi_remove_device(). ++ * device_remove_file() handles concurrent removal calls by ++ * serializing these and by ignoring the second and later removal ++ * attempts. Concurrent calls of scsi_remove_device() are ++ * serialized. The second and later calls of scsi_remove_device() are ++ * ignored because the first call of that function changes the device ++ * state into SDEV_DEL. ++ */ ++ device_remove_file(dev, attr); ++ scsi_remove_device(to_scsi_device(dev)); ++ if (kn) ++ sysfs_unbreak_active_protection(kn); + return count; + }; + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c +index 777e5f1e52d1..0cd947f78b5b 100644 +--- a/drivers/scsi/vmw_pvscsi.c ++++ b/drivers/scsi/vmw_pvscsi.c +@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + (btstat == BTSTAT_SUCCESS || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { +- cmd->result = (DID_OK << 16) | sdstat; +- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) +- cmd->result |= (DRIVER_SENSE << 24); ++ if (sdstat == SAM_STAT_COMMAND_TERMINATED) { ++ cmd->result = (DID_RESET << 16); ++ } else { ++ cmd->result = (DID_OK << 16) | sdstat; ++ if (sdstat == SAM_STAT_CHECK_CONDITION && ++ cmd->sense_buffer) ++ cmd->result |= (DRIVER_SENSE << 24); ++ } + } else + switch (btstat) { + case BTSTAT_SUCCESS: +diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c +index 9e2f0421a01e..0bf6643cca07 100644 +--- a/drivers/staging/media/omap4iss/iss_video.c ++++ b/drivers/staging/media/omap4iss/iss_video.c +@@ -11,7 +11,6 @@ + * (at your option) any later version. + */ + +-#include <asm/cacheflush.h> + #include <linux/clk.h> + #include <linux/mm.h> + #include <linux/pagemap.h> +@@ -24,6 +23,8 @@ + #include <media/v4l2-ioctl.h> + #include <media/v4l2-mc.h> + ++#include <asm/cacheflush.h> ++ + #include "iss_video.h" + #include "iss.h" + +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c +index 514986b57c2d..25eb3891e34b 100644 +--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c ++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c +@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) + struct iscsi_param *param; + u32 mrdsl, mbl; + u32 max_npdu, max_iso_npdu; ++ u32 max_iso_payload; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(MAXBURSTLENGTH, +@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) + mrdsl = conn_ops->MaxRecvDataSegmentLength; + max_npdu = mbl / mrdsl; + +- max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / +- (ISCSI_HDR_LEN + mrdsl + ++ max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); ++ ++ max_iso_npdu = max_iso_payload / ++ (ISCSI_HDR_LEN + mrdsl + + cxgbit_digest_len[csk->submode]); + + csk->max_iso_npdu = min(max_npdu, max_iso_npdu); +@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn) + if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) + conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + ++ if (cxgbit_set_digest(csk)) ++ return -1; ++ + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, + conn->param_list); +@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) + if (is_t5(cdev->lldi.adapter_type)) + goto enable_ddp; + else +- goto enable_digest; ++ return 0; + } + + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { +@@ -781,10 +787,6 @@ enable_ddp: + } + } + +-enable_digest: +- if (cxgbit_set_digest(csk)) +- return -1; +- + return 0; + } + +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index dc13afbd4c88..98e27da34f3c 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -345,8 +345,7 @@ static int iscsi_login_zero_tsih_s1( + pr_err("idr_alloc() for sess_idr failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess); +- return -ENOMEM; ++ goto free_sess; + } + + sess->creation_time = get_jiffies_64(); +@@ -362,20 +361,28 @@ static int iscsi_login_zero_tsih_s1( + ISCSI_LOGIN_STATUS_NO_RESOURCES); + pr_err("Unable to allocate memory for" + " struct iscsi_sess_ops.\n"); +- kfree(sess); +- return -ENOMEM; ++ goto remove_idr; + } + + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); + if (IS_ERR(sess->se_sess)) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess->sess_ops); +- kfree(sess); +- return -ENOMEM; ++ goto free_ops; + } + + return 0; ++ ++free_ops: ++ kfree(sess->sess_ops); ++remove_idr: ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++free_sess: ++ kfree(sess); ++ conn->sess = NULL; ++ return -ENOMEM; + } + + static int iscsi_login_zero_tsih_s2( +@@ -1162,13 +1169,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, + ISCSI_LOGIN_STATUS_INIT_ERR); + if (!zero_tsih || !conn->sess) + goto old_sess_out; +- if (conn->sess->se_sess) +- transport_free_session(conn->sess->se_sess); +- if (conn->sess->session_index != 0) { +- spin_lock_bh(&sess_idr_lock); +- idr_remove(&sess_idr, conn->sess->session_index); +- spin_unlock_bh(&sess_idr_lock); +- } ++ ++ transport_free_session(conn->sess->se_sess); ++ ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, conn->sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++ + kfree(conn->sess->sess_ops); + kfree(conn->sess); + conn->sess = NULL; +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index 97cb2dfd6369..d063f0401f84 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -442,14 +442,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = { + }; + + struct cntrl_cur_lay3 { +- __u32 dCUR; ++ __le32 dCUR; + }; + + struct cntrl_range_lay3 { +- __u16 wNumSubRanges; +- __u32 dMIN; +- __u32 dMAX; +- __u32 dRES; ++ __le16 wNumSubRanges; ++ __le32 dMIN; ++ __le32 dMAX; ++ __le32 dRES; + } __packed; + + static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, +@@ -563,13 +563,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) + agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); + if (!agdev->out_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); +- return ret; ++ return -ENODEV; + } + + agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); + if (!agdev->in_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); +- return ret; ++ return -ENODEV; + } + + agdev->in_ep_maxpsize = max_t(u16, +@@ -707,9 +707,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) + memset(&c, 0, sizeof(struct cntrl_cur_lay3)); + + if (entity_id == USB_IN_CLK_ID) +- c.dCUR = p_srate; ++ c.dCUR = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- c.dCUR = c_srate; ++ c.dCUR = cpu_to_le32(c_srate); + + value = min_t(unsigned, w_length, sizeof c); + memcpy(req->buf, &c, value); +@@ -746,15 +746,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + if (entity_id == USB_IN_CLK_ID) +- r.dMIN = p_srate; ++ r.dMIN = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- r.dMIN = c_srate; ++ r.dMIN = cpu_to_le32(c_srate); + else + return -EOPNOTSUPP; + + r.dMAX = r.dMIN; + r.dRES = 0; +- r.wNumSubRanges = 1; ++ r.wNumSubRanges = cpu_to_le16(1); + + value = min_t(unsigned, w_length, sizeof r); + memcpy(req->buf, &r, value); +diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c +index 3971bbab88bd..d3a639297e06 100644 +--- a/drivers/usb/gadget/function/u_audio.c ++++ b/drivers/usb/gadget/function/u_audio.c +@@ -41,9 +41,6 @@ struct uac_req { + struct uac_rtd_params { + struct snd_uac_chip *uac; /* parent chip */ + bool ep_enabled; /* if the ep is enabled */ +- /* Size of the ring buffer */ +- size_t dma_bytes; +- unsigned char *dma_area; + + struct snd_pcm_substream *ss; + +@@ -52,8 +49,6 @@ struct uac_rtd_params { + + void *rbuf; + +- size_t period_size; +- + unsigned max_psize; /* MaxPacketSize of endpoint */ + struct uac_req *ureq; + +@@ -93,12 +88,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = { + static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) + { + unsigned pending; +- unsigned long flags; ++ unsigned long flags, flags2; + unsigned int hw_ptr; +- bool update_alsa = false; + int status = req->status; + struct uac_req *ur = req->context; + struct snd_pcm_substream *substream; ++ struct snd_pcm_runtime *runtime; + struct uac_rtd_params *prm = ur->pp; + struct snd_uac_chip *uac = prm->uac; + +@@ -120,6 +115,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) + if (!substream) + goto exit; + ++ snd_pcm_stream_lock_irqsave(substream, flags2); ++ ++ runtime = substream->runtime; ++ if (!runtime || !snd_pcm_running(substream)) { ++ snd_pcm_stream_unlock_irqrestore(substream, flags2); ++ goto exit; ++ } ++ + spin_lock_irqsave(&prm->lock, flags); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { +@@ -146,43 +149,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) + req->actual = req->length; + } + +- pending = prm->hw_ptr % prm->period_size; +- pending += req->actual; +- if (pending >= prm->period_size) +- update_alsa = true; +- + hw_ptr = prm->hw_ptr; +- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes; + + spin_unlock_irqrestore(&prm->lock, flags); + + /* Pack USB load in ALSA ring buffer */ +- pending = prm->dma_bytes - hw_ptr; ++ pending = runtime->dma_bytes - hw_ptr; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + if (unlikely(pending < req->actual)) { +- memcpy(req->buf, prm->dma_area + hw_ptr, pending); +- memcpy(req->buf + pending, prm->dma_area, ++ memcpy(req->buf, runtime->dma_area + hw_ptr, pending); ++ memcpy(req->buf + pending, runtime->dma_area, + req->actual - pending); + } else { +- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); ++ memcpy(req->buf, runtime->dma_area + hw_ptr, ++ req->actual); + } + } else { + if (unlikely(pending < req->actual)) { +- memcpy(prm->dma_area + hw_ptr, req->buf, pending); +- memcpy(prm->dma_area, req->buf + pending, ++ memcpy(runtime->dma_area + hw_ptr, req->buf, pending); ++ memcpy(runtime->dma_area, req->buf + pending, + req->actual - pending); + } else { +- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); ++ memcpy(runtime->dma_area + hw_ptr, req->buf, ++ req->actual); + } + } + ++ spin_lock_irqsave(&prm->lock, flags); ++ /* update hw_ptr after data is copied to memory */ ++ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes; ++ hw_ptr = prm->hw_ptr; ++ spin_unlock_irqrestore(&prm->lock, flags); ++ snd_pcm_stream_unlock_irqrestore(substream, flags2); ++ ++ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual) ++ snd_pcm_period_elapsed(substream); ++ + exit: + if (usb_ep_queue(ep, req, GFP_ATOMIC)) + dev_err(uac->card->dev, "%d Error!\n", __LINE__); +- +- if (update_alsa) +- snd_pcm_period_elapsed(substream); + } + + static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +@@ -245,40 +251,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream) + static int uac_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) + { +- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); +- struct uac_rtd_params *prm; +- int err; +- +- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) +- prm = &uac->p_prm; +- else +- prm = &uac->c_prm; +- +- err = snd_pcm_lib_malloc_pages(substream, ++ return snd_pcm_lib_malloc_pages(substream, + params_buffer_bytes(hw_params)); +- if (err >= 0) { +- prm->dma_bytes = substream->runtime->dma_bytes; +- prm->dma_area = substream->runtime->dma_area; +- prm->period_size = params_period_bytes(hw_params); +- } +- +- return err; + } + + static int uac_pcm_hw_free(struct snd_pcm_substream *substream) + { +- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); +- struct uac_rtd_params *prm; +- +- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) +- prm = &uac->p_prm; +- else +- prm = &uac->c_prm; +- +- prm->dma_area = NULL; +- prm->dma_bytes = 0; +- prm->period_size = 0; +- + return snd_pcm_lib_free_pages(substream); + } + +@@ -604,15 +582,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, + if (err < 0) + goto snd_fail; + +- strcpy(pcm->name, pcm_name); ++ strlcpy(pcm->name, pcm_name, sizeof(pcm->name)); + pcm->private_data = uac; + uac->pcm = pcm; + + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); + +- strcpy(card->driver, card_name); +- strcpy(card->shortname, card_name); ++ strlcpy(card->driver, card_name, sizeof(card->driver)); ++ strlcpy(card->shortname, card_name, sizeof(card->shortname)); + sprintf(card->longname, "%s %i", card_name, card->dev->id); + + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c +index 118ad70f1af0..84b227ede082 100644 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c +@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597) + + r8a66597_bset(r8a66597, XCKE, SYSCFG0); + +- msleep(3); ++ mdelay(3); + + r8a66597_bset(r8a66597, PLLC, SYSCFG0); + +- msleep(1); ++ mdelay(1); + + r8a66597_bset(r8a66597, SCKE, SYSCFG0); + +@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock) + r8a66597->ep0_req->length = 2; + /* AV: what happens if we get called again before that gets through? */ + spin_unlock(&r8a66597->lock); +- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); ++ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); + spin_lock(&r8a66597->lock); + } + +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c +index cf8f40ae6e01..9b4354a00ca7 100644 +--- a/drivers/usb/phy/phy-fsl-usb.c ++++ b/drivers/usb/phy/phy-fsl-usb.c +@@ -874,6 +874,7 @@ int usb_otg_start(struct platform_device *pdev) + if (pdata->init && pdata->init(pdev) != 0) + return -EINVAL; + ++#ifdef CONFIG_PPC32 + if (pdata->big_endian_mmio) { + _fsl_readl = _fsl_readl_be; + _fsl_writel = _fsl_writel_be; +@@ -881,6 +882,7 @@ int usb_otg_start(struct platform_device *pdev) + _fsl_readl = _fsl_readl_le; + _fsl_writel = _fsl_writel_le; + } ++#endif + + /* request irq */ + p_otg->irq = platform_get_irq(pdev, 0); +@@ -971,7 +973,7 @@ int usb_otg_start(struct platform_device *pdev) + /* + * state file in sysfs + */ +-static int show_fsl_usb2_otg_state(struct device *dev, ++static ssize_t show_fsl_usb2_otg_state(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct otg_fsm *fsm = &fsl_otg_dev->fsm; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index b475d1ebbbbf..5cf1bbe9754c 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1098,8 +1098,9 @@ static int btree_writepages(struct address_space *mapping, + + fs_info = BTRFS_I(mapping->host)->root->fs_info; + /* this is a bit racy, but that's ok */ +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, +- BTRFS_DIRTY_METADATA_THRESH); ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, ++ BTRFS_DIRTY_METADATA_THRESH, ++ fs_info->dirty_metadata_batch); + if (ret < 0) + return 0; + } +@@ -4030,8 +4031,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, + if (flush_delayed) + btrfs_balance_delayed_items(fs_info); + +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, +- BTRFS_DIRTY_METADATA_THRESH); ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, ++ BTRFS_DIRTY_METADATA_THRESH, ++ fs_info->dirty_metadata_batch); + if (ret > 0) { + balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); + } +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 53487102081d..bbabe37c2e8c 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -4407,7 +4407,7 @@ commit_trans: + data_sinfo->flags, bytes, 1); + spin_unlock(&data_sinfo->lock); + +- return ret; ++ return 0; + } + + int btrfs_check_data_free_space(struct inode *inode, +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 28a58f40f3a4..e8bfafa25a71 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6152,32 +6152,6 @@ err: + return ret; + } + +-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) +-{ +- struct btrfs_root *root = BTRFS_I(inode)->root; +- struct btrfs_trans_handle *trans; +- int ret = 0; +- bool nolock = false; +- +- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) +- return 0; +- +- if (btrfs_fs_closing(root->fs_info) && +- btrfs_is_free_space_inode(BTRFS_I(inode))) +- nolock = true; +- +- if (wbc->sync_mode == WB_SYNC_ALL) { +- if (nolock) +- trans = btrfs_join_transaction_nolock(root); +- else +- trans = btrfs_join_transaction(root); +- if (IS_ERR(trans)) +- return PTR_ERR(trans); +- ret = btrfs_commit_transaction(trans); +- } +- return ret; +-} +- + /* + * This is somewhat expensive, updating the tree every time the + * inode changes. But, it is most likely to find the inode in cache. +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 8e3ce81d3f44..fe960d5e8913 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -2271,7 +2271,6 @@ static const struct super_operations btrfs_super_ops = { + .sync_fs = btrfs_sync_fs, + .show_options = btrfs_show_options, + .show_devname = btrfs_show_devname, +- .write_inode = btrfs_write_inode, + .alloc_inode = btrfs_alloc_inode, + .destroy_inode = btrfs_destroy_inode, + .statfs = btrfs_statfs, +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index 3978b324cbca..5f2f67d220fa 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -195,7 +195,6 @@ wait_for_old_object: + pr_err("\n"); + pr_err("Error: Unexpected object collision\n"); + cachefiles_printk_object(object, xobject); +- BUG(); + } + atomic_inc(&xobject->usage); + write_unlock(&cache->active_lock); +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index 18d7aa61ef0f..199eb396a1bb 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, + struct cachefiles_one_read *monitor = + container_of(wait, struct cachefiles_one_read, monitor); + struct cachefiles_object *object; ++ struct fscache_retrieval *op = monitor->op; + struct wait_bit_key *key = _key; + struct page *page = wait->private; + +@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, + list_del(&wait->entry); + + /* move onto the action list and queue for FS-Cache thread pool */ +- ASSERT(monitor->op); ++ ASSERT(op); + +- object = container_of(monitor->op->op.object, +- struct cachefiles_object, fscache); ++ /* We need to temporarily bump the usage count as we don't own a ref ++ * here otherwise cachefiles_read_copier() may free the op between the ++ * monitor being enqueued on the op->to_do list and the op getting ++ * enqueued on the work queue. ++ */ ++ fscache_get_retrieval(op); + ++ object = container_of(op->op.object, struct cachefiles_object, fscache); + spin_lock(&object->work_lock); +- list_add_tail(&monitor->op_link, &monitor->op->to_do); ++ list_add_tail(&monitor->op_link, &op->to_do); + spin_unlock(&object->work_lock); + +- fscache_enqueue_retrieval(monitor->op); ++ fscache_enqueue_retrieval(op); ++ fscache_put_retrieval(op); + return 0; + } + +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index cbb9534b89b4..53c9c49f0fbb 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) + seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); + seq_printf(m, "Features:"); + #ifdef CONFIG_CIFS_DFS_UPCALL +- seq_printf(m, " dfs"); ++ seq_printf(m, " DFS"); + #endif + #ifdef CONFIG_CIFS_FSCACHE +- seq_printf(m, " fscache"); ++ seq_printf(m, ",FSCACHE"); ++#endif ++#ifdef CONFIG_CIFS_SMB_DIRECT ++ seq_printf(m, ",SMB_DIRECT"); ++#endif ++#ifdef CONFIG_CIFS_STATS2 ++ seq_printf(m, ",STATS2"); ++#elif defined(CONFIG_CIFS_STATS) ++ seq_printf(m, ",STATS"); ++#endif ++#ifdef CONFIG_CIFS_DEBUG2 ++ seq_printf(m, ",DEBUG2"); ++#elif defined(CONFIG_CIFS_DEBUG) ++ seq_printf(m, ",DEBUG"); ++#endif ++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY ++ seq_printf(m, ",ALLOW_INSECURE_LEGACY"); + #endif + #ifdef CONFIG_CIFS_WEAK_PW_HASH +- seq_printf(m, " lanman"); ++ seq_printf(m, ",WEAK_PW_HASH"); + #endif + #ifdef CONFIG_CIFS_POSIX +- seq_printf(m, " posix"); ++ seq_printf(m, ",CIFS_POSIX"); + #endif + #ifdef CONFIG_CIFS_UPCALL +- seq_printf(m, " spnego"); ++ seq_printf(m, ",UPCALL(SPNEGO)"); + #endif + #ifdef CONFIG_CIFS_XATTR +- seq_printf(m, " xattr"); ++ seq_printf(m, ",XATTR"); + #endif + #ifdef CONFIG_CIFS_ACL +- seq_printf(m, " acl"); ++ seq_printf(m, ",ACL"); + #endif + seq_putc(m, '\n'); + seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 490c5fc9e69c..44a7b2dea688 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -197,14 +197,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) + + xid = get_xid(); + +- /* +- * PATH_MAX may be too long - it would presumably be total path, +- * but note that some servers (includinng Samba 3) have a shorter +- * maximum path. +- * +- * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO. +- */ +- buf->f_namelen = PATH_MAX; ++ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) ++ buf->f_namelen = ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); ++ else ++ buf->f_namelen = PATH_MAX; ++ ++ buf->f_fsid.val[0] = tcon->vol_serial_number; ++ /* are using part of create time for more randomness, see man statfs */ ++ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); ++ + buf->f_files = 0; /* undefined */ + buf->f_ffree = 0; /* unlimited */ + +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 0c7b7e2a0919..caf9cf91b825 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -1122,6 +1122,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, + if (!server->ops->set_file_info) + return -ENOSYS; + ++ info_buf.Pad = 0; ++ + if (attrs->ia_valid & ATTR_ATIME) { + set_time = true; + info_buf.LastAccessTime = +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index 889a840172eb..9451a7f6893d 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int buf_type = CIFS_NO_BUFFER; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_II; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct smb2_file_all_info *pfile_info = NULL; + + oparms.tcon = tcon; +@@ -458,7 +458,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int create_options = CREATE_NOT_DIR; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct kvec iov[2]; + + if (backup_cred(cifs_sb)) +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c +index 8b0502cd39af..aa23c00367ec 100644 +--- a/fs/cifs/sess.c ++++ b/fs/cifs/sess.c +@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, + goto setup_ntlmv2_ret; + } + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL); ++ if (!*pbuffer) { ++ rc = -ENOMEM; ++ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); ++ *buflen = 0; ++ goto setup_ntlmv2_ret; ++ } + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; + + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c +index 1238cd3552f9..0267d8cbc996 100644 +--- a/fs/cifs/smb2inode.c ++++ b/fs/cifs/smb2inode.c +@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path, + int rc; + + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && +- (buf->LastWriteTime == 0) && (buf->ChangeTime) && ++ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && + (buf->Attributes == 0)) + return 0; /* would be a no op, no sense sending this */ + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 83267ac3a3f0..e9f246fe9d80 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -332,6 +332,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon) + FS_ATTRIBUTE_INFORMATION); + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, + FS_DEVICE_INFORMATION); ++ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, ++ FS_VOLUME_INFORMATION); + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, + FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */ + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); +@@ -1129,6 +1131,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, + + } + ++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ ++#define GMT_TOKEN_SIZE 50 ++ ++/* ++ * Input buffer contains (empty) struct smb_snapshot array with size filled in ++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 ++ */ + static int + smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + struct cifsFileInfo *cfile, void __user *ioc_buf) +@@ -1158,14 +1167,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + kfree(retbuf); + return rc; + } +- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { +- rc = -ERANGE; +- kfree(retbuf); +- return rc; +- } + +- if (ret_data_len > snapshot_in.snapshot_array_size) +- ret_data_len = snapshot_in.snapshot_array_size; ++ /* ++ * Check for min size, ie not large enough to fit even one GMT ++ * token (snapshot). On the first ioctl some users may pass in ++ * smaller size (or zero) to simply get the size of the array ++ * so the user space caller can allocate sufficient memory ++ * and retry the ioctl again with larger array size sufficient ++ * to hold all of the snapshot GMT tokens on the second try. ++ */ ++ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) ++ ret_data_len = sizeof(struct smb_snapshot_array); ++ ++ /* ++ * We return struct SRV_SNAPSHOT_ARRAY, followed by ++ * the snapshot array (of 50 byte GMT tokens) each ++ * representing an available previous version of the data ++ */ ++ if (ret_data_len > (snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array))) ++ ret_data_len = snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array); + + if (copy_to_user(ioc_buf, retbuf, ret_data_len)) + rc = -EFAULT; +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 71b81980787f..e317e9a400c1 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -3455,6 +3455,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, + } else if (level == FS_SECTOR_SIZE_INFORMATION) { + max_len = sizeof(struct smb3_fs_ss_info); + min_len = sizeof(struct smb3_fs_ss_info); ++ } else if (level == FS_VOLUME_INFORMATION) { ++ max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; ++ min_len = sizeof(struct smb3_fs_vol_info); + } else { + cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); + return -EINVAL; +@@ -3495,6 +3498,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, + tcon->ss_flags = le32_to_cpu(ss_info->Flags); + tcon->perf_sector_size = + le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); ++ } else if (level == FS_VOLUME_INFORMATION) { ++ struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) ++ (offset + (char *)rsp); ++ tcon->vol_serial_number = vol_info->VolumeSerialNumber; ++ tcon->vol_create_time = vol_info->VolumeCreationTime; + } + + qfsattr_exit: +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index c2ec934be968..e52454059725 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -1108,6 +1108,17 @@ struct smb3_fs_ss_info { + __le32 ByteOffsetForPartitionAlignment; + } __packed; + ++/* volume info struct - see MS-FSCC 2.5.9 */ ++#define MAX_VOL_LABEL_LEN 32 ++struct smb3_fs_vol_info { ++ __le64 VolumeCreationTime; ++ __u32 VolumeSerialNumber; ++ __le32 VolumeLabelLength; /* includes trailing null */ ++ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */ ++ __u8 Reserved; ++ __u8 VolumeLabel[0]; /* variable len */ ++} __packed; ++ + /* partial list of QUERY INFO levels */ + #define FILE_DIRECTORY_INFORMATION 1 + #define FILE_FULL_DIRECTORY_INFORMATION 2 +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c +index 27b9a76a0dfa..638ad4743477 100644 +--- a/fs/ext4/mmp.c ++++ b/fs/ext4/mmp.c +@@ -186,11 +186,8 @@ static int kmmpd(void *data) + goto exit_thread; + } + +- if (sb_rdonly(sb)) { +- ext4_warning(sb, "kmmpd being stopped since filesystem " +- "has been remounted as readonly."); +- goto exit_thread; +- } ++ if (sb_rdonly(sb)) ++ break; + + diff = jiffies - last_update_time; + if (diff < mmp_update_interval * HZ) +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 6747861f9b70..1db39e12e02b 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1397,6 +1397,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, + goto cleanup_and_exit; + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " + "falling back\n")); ++ ret = NULL; + } + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); + if (!nblocks) { +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index f30d2bf40471..b4fb085261fd 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -5163,6 +5163,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) + + if (sbi->s_journal) + ext4_mark_recovery_complete(sb, es); ++ if (sbi->s_mmp_tsk) ++ kthread_stop(sbi->s_mmp_tsk); + } else { + /* Make sure we can mount this feature set readwrite */ + if (ext4_has_feature_readonly(sb) || +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c +index e21afd52e7d7..bdfc2a2de8f2 100644 +--- a/fs/ext4/sysfs.c ++++ b/fs/ext4/sysfs.c +@@ -278,8 +278,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj, + case attr_pointer_ui: + if (!ptr) + return 0; +- return snprintf(buf, PAGE_SIZE, "%u\n", +- *((unsigned int *) ptr)); ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ le32_to_cpup(ptr)); ++ else ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ *((unsigned int *) ptr)); + case attr_pointer_atomic: + if (!ptr) + return 0; +@@ -312,7 +316,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj, + ret = kstrtoul(skip_spaces(buf), 0, &t); + if (ret) + return ret; +- *((unsigned int *) ptr) = t; ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ *((__le32 *) ptr) = cpu_to_le32(t); ++ else ++ *((unsigned int *) ptr) = t; + return len; + case attr_inode_readahead: + return inode_readahead_blks_store(a, sbi, buf, len); +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index c7c8c16ccd93..9bc50eef6127 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -189,6 +189,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end, + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); + if ((void *)next >= end) + return -EFSCORRUPTED; ++ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) ++ return -EFSCORRUPTED; + e = next; + } + +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index de67745e1cd7..77946d6f617d 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERT(op->processor != NULL); + ASSERT(fscache_object_is_available(op->object)); + ASSERTCMP(atomic_read(&op->usage), >, 0); +- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); ++ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, ++ op->state, ==, FSCACHE_OP_ST_CANCELLED); + + fscache_stat(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { +@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op) + struct fscache_cache *cache; + + _enter("{OBJ%x OP%x,%d}", +- op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ++ op->object ? op->object->debug_id : 0, ++ op->debug_id, atomic_read(&op->usage)); + + ASSERTCMP(atomic_read(&op->usage), >, 0); + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 261fd13a75c6..ee8105af4001 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -131,6 +131,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) + return !fc->initialized || (for_background && fc->blocked); + } + ++static void fuse_drop_waiting(struct fuse_conn *fc) ++{ ++ if (fc->connected) { ++ atomic_dec(&fc->num_waiting); ++ } else if (atomic_dec_and_test(&fc->num_waiting)) { ++ /* wake up aborters */ ++ wake_up_all(&fc->blocked_waitq); ++ } ++} ++ + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + bool for_background) + { +@@ -171,7 +181,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + return req; + + out: +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + return ERR_PTR(err); + } + +@@ -278,7 +288,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) + + if (test_bit(FR_WAITING, &req->flags)) { + __clear_bit(FR_WAITING, &req->flags); +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + } + + if (req->stolen_file) +@@ -364,7 +374,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + struct fuse_iqueue *fiq = &fc->iq; + + if (test_and_set_bit(FR_FINISHED, &req->flags)) +- return; ++ goto put_request; + + spin_lock(&fiq->waitq.lock); + list_del_init(&req->intr_entry); +@@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + wake_up(&req->waitq); + if (req->end) + req->end(fc, req); ++put_request: + fuse_put_request(fc, req); + } + +@@ -1941,11 +1952,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + if (!fud) + return -EPERM; + ++ pipe_lock(pipe); ++ + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); +- if (!bufs) ++ if (!bufs) { ++ pipe_unlock(pipe); + return -ENOMEM; ++ } + +- pipe_lock(pipe); + nbuf = 0; + rem = 0; + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) +@@ -2100,6 +2114,7 @@ void fuse_abort_conn(struct fuse_conn *fc) + set_bit(FR_ABORTED, &req->flags); + if (!test_bit(FR_LOCKED, &req->flags)) { + set_bit(FR_PRIVATE, &req->flags); ++ __fuse_get_request(req); + list_move(&req->list, &to_end1); + } + spin_unlock(&req->waitq.lock); +@@ -2126,7 +2141,6 @@ void fuse_abort_conn(struct fuse_conn *fc) + + while (!list_empty(&to_end1)) { + req = list_first_entry(&to_end1, struct fuse_req, list); +- __fuse_get_request(req); + list_del_init(&req->list); + request_end(fc, req); + } +@@ -2137,6 +2151,11 @@ void fuse_abort_conn(struct fuse_conn *fc) + } + EXPORT_SYMBOL_GPL(fuse_abort_conn); + ++void fuse_wait_aborted(struct fuse_conn *fc) ++{ ++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); ++} ++ + int fuse_dev_release(struct inode *inode, struct file *file) + { + struct fuse_dev *fud = fuse_get_dev(file); +@@ -2144,9 +2163,15 @@ int fuse_dev_release(struct inode *inode, struct file *file) + if (fud) { + struct fuse_conn *fc = fud->fc; + struct fuse_pqueue *fpq = &fud->pq; ++ LIST_HEAD(to_end); + ++ spin_lock(&fpq->lock); + WARN_ON(!list_empty(&fpq->io)); +- end_requests(fc, &fpq->processing); ++ list_splice_init(&fpq->processing, &to_end); ++ spin_unlock(&fpq->lock); ++ ++ end_requests(fc, &to_end); ++ + /* Are we the last open device? */ + if (atomic_dec_and_test(&fc->dev_count)) { + WARN_ON(fc->iq.fasync != NULL); +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 7a980b4462d9..29868c35c19a 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, + struct inode *inode; + struct dentry *newent; + bool outarg_valid = true; ++ bool locked; + +- fuse_lock_inode(dir); ++ locked = fuse_lock_inode(dir); + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, + &outarg, &inode); +- fuse_unlock_inode(dir); ++ fuse_unlock_inode(dir, locked); + if (err == -ENOENT) { + outarg_valid = false; + err = 0; +@@ -1332,6 +1333,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + u64 attr_version = 0; ++ bool locked; + + if (is_bad_inode(inode)) + return -EIO; +@@ -1359,9 +1361,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + FUSE_READDIR); + } +- fuse_lock_inode(inode); ++ locked = fuse_lock_inode(inode); + fuse_request_send(fc, req); +- fuse_unlock_inode(inode); ++ fuse_unlock_inode(inode, locked); + nbytes = req->out.args[0].size; + err = req->out.h.error; + fuse_put_request(fc, req); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index cb7dff5c45d7..fb4738ef162f 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) + } + + if (WARN_ON(req->num_pages >= req->max_pages)) { ++ unlock_page(page); + fuse_put_request(fc, req); + return -EIO; + } +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index d5773ca67ad2..e105640153ce 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -852,6 +852,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, + + /* Abort all requests */ + void fuse_abort_conn(struct fuse_conn *fc); ++void fuse_wait_aborted(struct fuse_conn *fc); + + /** + * Invalidate inode attributes +@@ -964,8 +965,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + + void fuse_set_initialized(struct fuse_conn *fc); + +-void fuse_unlock_inode(struct inode *inode); +-void fuse_lock_inode(struct inode *inode); ++void fuse_unlock_inode(struct inode *inode, bool locked); ++bool fuse_lock_inode(struct inode *inode); + + int fuse_setxattr(struct inode *inode, const char *name, const void *value, + size_t size, int flags); +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index a13ecefa9cd1..ffb61787d77a 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, + return 0; + } + +-void fuse_lock_inode(struct inode *inode) ++bool fuse_lock_inode(struct inode *inode) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ bool locked = false; ++ ++ if (!get_fuse_conn(inode)->parallel_dirops) { + mutex_lock(&get_fuse_inode(inode)->mutex); ++ locked = true; ++ } ++ ++ return locked; + } + +-void fuse_unlock_inode(struct inode *inode) ++void fuse_unlock_inode(struct inode *inode, bool locked) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ if (locked) + mutex_unlock(&get_fuse_inode(inode)->mutex); + } + +@@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + +- fuse_send_destroy(fc); +- +- fuse_abort_conn(fc); + mutex_lock(&fuse_mutex); + list_del(&fc->entry); + fuse_ctl_remove_conn(fc); +@@ -1190,16 +1193,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type, + return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); + } + +-static void fuse_kill_sb_anon(struct super_block *sb) ++static void fuse_sb_destroy(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + + if (fc) { ++ fuse_send_destroy(fc); ++ ++ fuse_abort_conn(fc); ++ fuse_wait_aborted(fc); ++ + down_write(&fc->killsb); + fc->sb = NULL; + up_write(&fc->killsb); + } ++} + ++static void fuse_kill_sb_anon(struct super_block *sb) ++{ ++ fuse_sb_destroy(sb); + kill_anon_super(sb); + } + +@@ -1222,14 +1234,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, + + static void fuse_kill_sb_blk(struct super_block *sb) + { +- struct fuse_conn *fc = get_fuse_conn_super(sb); +- +- if (fc) { +- down_write(&fc->killsb); +- fc->sb = NULL; +- up_write(&fc->killsb); +- } +- ++ fuse_sb_destroy(sb); + kill_block_super(sb); + } + +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index fcff2e0487fe..f1c1430ae721 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) + return squashfs_block_size(size); + } + ++void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) ++{ ++ int copied; ++ void *pageaddr; ++ ++ pageaddr = kmap_atomic(page); ++ copied = squashfs_copy_data(pageaddr, buffer, offset, avail); ++ memset(pageaddr + copied, 0, PAGE_SIZE - copied); ++ kunmap_atomic(pageaddr); ++ ++ flush_dcache_page(page); ++ if (copied == avail) ++ SetPageUptodate(page); ++ else ++ SetPageError(page); ++} ++ + /* Copy data into page cache */ + void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + int bytes, int offset) + { + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- void *pageaddr; + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; + int start_index = page->index & ~mask, end_index = start_index | mask; + +@@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + if (PageUptodate(push_page)) + goto skip_page; + +- pageaddr = kmap_atomic(push_page); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(push_page); +- SetPageUptodate(push_page); ++ squashfs_fill_page(push_page, buffer, offset, avail); + skip_page: + unlock_page(push_page); + if (i != page->index) +@@ -420,10 +431,9 @@ skip_page: + } + + /* Read datablock stored packed inside a fragment (tail-end packed block) */ +-static int squashfs_readpage_fragment(struct page *page) ++static int squashfs_readpage_fragment(struct page *page, int expected) + { + struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); +@@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page) + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + else +- squashfs_copy_cache(page, buffer, i_size_read(inode) & +- (msblk->block_size - 1), ++ squashfs_copy_cache(page, buffer, expected, + squashfs_i(inode)->fragment_offset); + + squashfs_cache_put(buffer); + return res; + } + +-static int squashfs_readpage_sparse(struct page *page, int index, int file_end) ++static int squashfs_readpage_sparse(struct page *page, int expected) + { +- struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- int bytes = index == file_end ? +- (i_size_read(inode) & (msblk->block_size - 1)) : +- msblk->block_size; +- +- squashfs_copy_cache(page, NULL, bytes, 0); ++ squashfs_copy_cache(page, NULL, expected, 0); + return 0; + } + +@@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page) + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int file_end = i_size_read(inode) >> msblk->block_log; ++ int expected = index == file_end ? ++ (i_size_read(inode) & (msblk->block_size - 1)) : ++ msblk->block_size; + int res; + void *pageaddr; + +@@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page) + goto error_out; + + if (bsize == 0) +- res = squashfs_readpage_sparse(page, index, file_end); ++ res = squashfs_readpage_sparse(page, expected); + else +- res = squashfs_readpage_block(page, block, bsize); ++ res = squashfs_readpage_block(page, block, bsize, expected); + } else +- res = squashfs_readpage_fragment(page); ++ res = squashfs_readpage_fragment(page, expected); + + if (!res) + return 0; +diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c +index f2310d2a2019..a9ba8d96776a 100644 +--- a/fs/squashfs/file_cache.c ++++ b/fs/squashfs/file_cache.c +@@ -20,7 +20,7 @@ + #include "squashfs.h" + + /* Read separately compressed datablock and memcopy into page cache */ +-int squashfs_readpage_block(struct page *page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) + { + struct inode *i = page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, +@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize) + ERROR("Unable to read page, block %llx, size %x\n", block, + bsize); + else +- squashfs_copy_cache(page, buffer, buffer->length, 0); ++ squashfs_copy_cache(page, buffer, expected, 0); + + squashfs_cache_put(buffer); + return res; +diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c +index cb485d8e0e91..80db1b86a27c 100644 +--- a/fs/squashfs/file_direct.c ++++ b/fs/squashfs/file_direct.c +@@ -21,10 +21,11 @@ + #include "page_actor.h" + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page); ++ int pages, struct page **page, int bytes); + + /* Read separately compressed datablock directly into page cache */ +-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, ++ int expected) + + { + struct inode *inode = target_page->mapping->host; +@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + * using an intermediate buffer. + */ + res = squashfs_read_cache(target_page, block, bsize, pages, +- page); ++ page, expected); + if (res < 0) + goto mark_errored; + +@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + if (res < 0) + goto mark_errored; + ++ if (res != expected) { ++ res = -EIO; ++ goto mark_errored; ++ } ++ + /* Last page may have trailing bytes not filled */ + bytes = res % PAGE_SIZE; + if (bytes) { +@@ -138,13 +144,12 @@ out: + + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page) ++ int pages, struct page **page, int bytes) + { + struct inode *i = target_page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, + block, bsize); +- int bytes = buffer->length, res = buffer->error, n, offset = 0; +- void *pageaddr; ++ int res = buffer->error, n, offset = 0; + + if (res) { + ERROR("Unable to read page, block %llx, size %x\n", block, +@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, + if (page[n] == NULL) + continue; + +- pageaddr = kmap_atomic(page[n]); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(page[n]); +- SetPageUptodate(page[n]); ++ squashfs_fill_page(page[n], buffer, offset, avail); + unlock_page(page[n]); + if (page[n] != target_page) + put_page(page[n]); +diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h +index 887d6d270080..f89f8a74c6ce 100644 +--- a/fs/squashfs/squashfs.h ++++ b/fs/squashfs/squashfs.h +@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, + u64, u64, unsigned int); + + /* file.c */ ++void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); + void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, + int); + + /* file_xxx.c */ +-extern int squashfs_readpage_block(struct page *, u64, int); ++extern int squashfs_readpage_block(struct page *, u64, int, int); + + /* id.c */ + extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index 39c75a86c67f..666986b95c5d 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, + } + EXPORT_SYMBOL_GPL(sysfs_chmod_file); + ++/** ++ * sysfs_break_active_protection - break "active" protection ++ * @kobj: The kernel object @attr is associated with. ++ * @attr: The attribute to break the "active" protection for. ++ * ++ * With sysfs, just like kernfs, deletion of an attribute is postponed until ++ * all active .show() and .store() callbacks have finished unless this function ++ * is called. Hence this function is useful in methods that implement self ++ * deletion. ++ */ ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ struct kernfs_node *kn; ++ ++ kobject_get(kobj); ++ kn = kernfs_find_and_get(kobj->sd, attr->name); ++ if (kn) ++ kernfs_break_active_protection(kn); ++ return kn; ++} ++EXPORT_SYMBOL_GPL(sysfs_break_active_protection); ++ ++/** ++ * sysfs_unbreak_active_protection - restore "active" protection ++ * @kn: Pointer returned by sysfs_break_active_protection(). ++ * ++ * Undo the effects of sysfs_break_active_protection(). Since this function ++ * calls kernfs_put() on the kernfs node that corresponds to the 'attr' ++ * argument passed to sysfs_break_active_protection() that attribute may have ++ * been removed between the sysfs_break_active_protection() and ++ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after ++ * this function has returned. ++ */ ++void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++ struct kobject *kobj = kn->parent->priv; ++ ++ kernfs_unbreak_active_protection(kn); ++ kernfs_put(kn); ++ kobject_put(kobj); ++} ++EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); ++ + /** + * sysfs_remove_file_ns - remove an object attribute with a custom ns tag + * @kobj: object we're acting for +diff --git a/include/linux/printk.h b/include/linux/printk.h +index 335926039adc..6106befed756 100644 +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -150,9 +150,13 @@ void early_printk(const char *s, ...) { } + #ifdef CONFIG_PRINTK_NMI + extern void printk_nmi_enter(void); + extern void printk_nmi_exit(void); ++extern void printk_nmi_direct_enter(void); ++extern void printk_nmi_direct_exit(void); + #else + static inline void printk_nmi_enter(void) { } + static inline void printk_nmi_exit(void) { } ++static inline void printk_nmi_direct_enter(void) { } ++static inline void printk_nmi_direct_exit(void) { } + #endif /* PRINTK_NMI */ + + #ifdef CONFIG_PRINTK +diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h +index 1b92a28dd672..6fd615a0eea9 100644 +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h +@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) + extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); + extern void rt_mutex_destroy(struct rt_mutex *lock); + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); ++#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) ++#else + extern void rt_mutex_lock(struct rt_mutex *lock); ++#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) ++#endif ++ + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h +index 40839c02d28c..cca19bb200bd 100644 +--- a/include/linux/sysfs.h ++++ b/include/linux/sysfs.h +@@ -239,6 +239,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, + const struct attribute **attr); + int __must_check sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode); ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr); ++void sysfs_unbreak_active_protection(struct kernfs_node *kn); + void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, + const void *ns); + bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); +@@ -352,6 +355,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, + return 0; + } + ++static inline struct kernfs_node * ++sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ return NULL; ++} ++ ++static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++} ++ + static inline void sysfs_remove_file_ns(struct kobject *kobj, + const struct attribute *attr, + const void *ns) +diff --git a/ipc/sem.c b/ipc/sem.c +index b2698ebdcb31..d6dd2dc9ddad 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -2041,7 +2041,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, + } + + do { +- queue.status = -EINTR; ++ WRITE_ONCE(queue.status, -EINTR); + queue.sleeper = current; + + __set_current_state(TASK_INTERRUPTIBLE); +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index a66e838640ea..5c90765d37e7 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -2531,7 +2531,7 @@ static int __init debugfs_kprobe_init(void) + if (!dir) + return -ENOMEM; + +- file = debugfs_create_file("list", 0444, dir, NULL, ++ file = debugfs_create_file("list", 0400, dir, NULL, + &debugfs_kprobes_operations); + if (!file) + goto error; +@@ -2541,7 +2541,7 @@ static int __init debugfs_kprobe_init(void) + if (!file) + goto error; + +- file = debugfs_create_file("blacklist", 0444, dir, NULL, ++ file = debugfs_create_file("blacklist", 0400, dir, NULL, + &debugfs_kprobe_blacklist_ops); + if (!file) + goto error; +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 65cc0cb984e6..4ad35718f123 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1466,6 +1466,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock, + rt_mutex_postunlock(&wake_q); + } + ++static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) ++{ ++ might_sleep(); ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); ++} ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++/** ++ * rt_mutex_lock_nested - lock a rt_mutex ++ * ++ * @lock: the rt_mutex to be locked ++ * @subclass: the lockdep subclass ++ */ ++void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) ++{ ++ __rt_mutex_lock(lock, subclass); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); ++#endif ++ ++#ifndef CONFIG_DEBUG_LOCK_ALLOC + /** + * rt_mutex_lock - lock a rt_mutex + * +@@ -1473,12 +1496,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock, + */ + void __sched rt_mutex_lock(struct rt_mutex *lock) + { +- might_sleep(); +- +- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); ++ __rt_mutex_lock(lock, 0); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock); ++#endif + + /** + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible +diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h +index 2a7d04049af4..0f1898820cba 100644 +--- a/kernel/printk/internal.h ++++ b/kernel/printk/internal.h +@@ -19,11 +19,16 @@ + #ifdef CONFIG_PRINTK + + #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff +-#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000 ++#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000 + #define PRINTK_NMI_CONTEXT_MASK 0x80000000 + + extern raw_spinlock_t logbuf_lock; + ++__printf(5, 0) ++int vprintk_store(int facility, int level, ++ const char *dict, size_t dictlen, ++ const char *fmt, va_list args); ++ + __printf(1, 0) int vprintk_default(const char *fmt, va_list args); + __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); + __printf(1, 0) int vprintk_func(const char *fmt, va_list args); +@@ -54,6 +59,8 @@ void __printk_safe_exit(void); + local_irq_enable(); \ + } while (0) + ++void defer_console_output(void); ++ + #else + + __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 512f7c2baedd..f0223a7d9ed1 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1680,28 +1680,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c + return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len); + } + +-asmlinkage int vprintk_emit(int facility, int level, +- const char *dict, size_t dictlen, +- const char *fmt, va_list args) ++/* Must be called under logbuf_lock. */ ++int vprintk_store(int facility, int level, ++ const char *dict, size_t dictlen, ++ const char *fmt, va_list args) + { + static char textbuf[LOG_LINE_MAX]; + char *text = textbuf; + size_t text_len; + enum log_flags lflags = 0; +- unsigned long flags; +- int printed_len; +- bool in_sched = false; +- +- if (level == LOGLEVEL_SCHED) { +- level = LOGLEVEL_DEFAULT; +- in_sched = true; +- } +- +- boot_delay_msec(level); +- printk_delay(); + +- /* This stops the holder of console_sem just where we want him */ +- logbuf_lock_irqsave(flags); + /* + * The printf needs to come first; we need the syslog + * prefix which might be passed-in as a parameter. +@@ -1742,8 +1730,29 @@ asmlinkage int vprintk_emit(int facility, int level, + if (dict) + lflags |= LOG_PREFIX|LOG_NEWLINE; + +- printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len); ++ return log_output(facility, level, lflags, ++ dict, dictlen, text, text_len); ++} + ++asmlinkage int vprintk_emit(int facility, int level, ++ const char *dict, size_t dictlen, ++ const char *fmt, va_list args) ++{ ++ int printed_len; ++ bool in_sched = false; ++ unsigned long flags; ++ ++ if (level == LOGLEVEL_SCHED) { ++ level = LOGLEVEL_DEFAULT; ++ in_sched = true; ++ } ++ ++ boot_delay_msec(level); ++ printk_delay(); ++ ++ /* This stops the holder of console_sem just where we want him */ ++ logbuf_lock_irqsave(flags); ++ printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args); + logbuf_unlock_irqrestore(flags); + + /* If called from the scheduler, we can not call up(). */ +@@ -2714,16 +2723,20 @@ void wake_up_klogd(void) + preempt_enable(); + } + +-int vprintk_deferred(const char *fmt, va_list args) ++void defer_console_output(void) + { +- int r; +- +- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); +- + preempt_disable(); + __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); + irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); + preempt_enable(); ++} ++ ++int vprintk_deferred(const char *fmt, va_list args) ++{ ++ int r; ++ ++ r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); ++ defer_console_output(); + + return r; + } +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c +index 64825b2df3a5..d482fd61ac67 100644 +--- a/kernel/printk/printk_safe.c ++++ b/kernel/printk/printk_safe.c +@@ -311,24 +311,33 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) + + void printk_nmi_enter(void) + { +- /* +- * The size of the extra per-CPU buffer is limited. Use it only when +- * the main one is locked. If this CPU is not in the safe context, +- * the lock must be taken on another CPU and we could wait for it. +- */ +- if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) && +- raw_spin_is_locked(&logbuf_lock)) { +- this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); +- } else { +- this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK); +- } ++ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); + } + + void printk_nmi_exit(void) + { +- this_cpu_and(printk_context, +- ~(PRINTK_NMI_CONTEXT_MASK | +- PRINTK_NMI_DEFERRED_CONTEXT_MASK)); ++ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); ++} ++ ++/* ++ * Marks a code that might produce many messages in NMI context ++ * and the risk of losing them is more critical than eventual ++ * reordering. ++ * ++ * It has effect only when called in NMI context. Then printk() ++ * will try to store the messages into the main logbuf directly ++ * and use the per-CPU buffers only as a fallback when the lock ++ * is not available. ++ */ ++void printk_nmi_direct_enter(void) ++{ ++ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) ++ this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK); ++} ++ ++void printk_nmi_direct_exit(void) ++{ ++ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); + } + + #else +@@ -366,6 +375,20 @@ void __printk_safe_exit(void) + + __printf(1, 0) int vprintk_func(const char *fmt, va_list args) + { ++ /* ++ * Try to use the main logbuf even in NMI. But avoid calling console ++ * drivers that might have their own locks. ++ */ ++ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && ++ raw_spin_trylock(&logbuf_lock)) { ++ int len; ++ ++ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); ++ raw_spin_unlock(&logbuf_lock); ++ defer_console_output(); ++ return len; ++ } ++ + /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ + if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) + return vprintk_nmi(fmt, args); +@@ -374,13 +397,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) + return vprintk_safe(fmt, args); + +- /* +- * Use the main logbuf when logbuf_lock is available in NMI. +- * But avoid calling console drivers that might have their own locks. +- */ +- if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK) +- return vprintk_deferred(fmt, args); +- + /* No obstacles. */ + return vprintk_default(fmt, args); + } +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index bba2217652ff..cb9a5b8532fa 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -837,6 +837,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) + * can be time-consuming. Try to avoid it when possible. + */ + raw_spin_lock(&rt_rq->rt_runtime_lock); ++ if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) ++ rt_rq->rt_runtime = rt_b->rt_runtime; + skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; + raw_spin_unlock(&rt_rq->rt_runtime_lock); + if (skip) +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c +index e190d1ef3a23..067cb83f37ea 100644 +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) + unsigned long flags; + bool enabled; + ++ preempt_disable(); + raw_spin_lock_irqsave(&stopper->lock, flags); + enabled = stopper->enabled; + if (enabled) +@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) + raw_spin_unlock_irqrestore(&stopper->lock, flags); + + wake_up_q(&wakeq); ++ preempt_enable(); + + return enabled; + } +@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, + struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); + DEFINE_WAKE_Q(wakeq); + int err; ++ + retry: ++ /* ++ * The waking up of stopper threads has to happen in the same ++ * scheduling context as the queueing. Otherwise, there is a ++ * possibility of one of the above stoppers being woken up by another ++ * CPU, and preempting us. This will cause us to not wake up the other ++ * stopper forever. ++ */ ++ preempt_disable(); + raw_spin_lock_irq(&stopper1->lock); + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + +- err = -ENOENT; +- if (!stopper1->enabled || !stopper2->enabled) ++ if (!stopper1->enabled || !stopper2->enabled) { ++ err = -ENOENT; + goto unlock; ++ } ++ + /* + * Ensure that if we race with __stop_cpus() the stoppers won't get + * queued up in reverse order leading to system deadlock. +@@ -253,36 +266,30 @@ retry: + * It can be falsely true but it is safe to spin until it is cleared, + * queue_stop_cpus_work() does everything under preempt_disable(). + */ +- err = -EDEADLK; +- if (unlikely(stop_cpus_in_progress)) +- goto unlock; ++ if (unlikely(stop_cpus_in_progress)) { ++ err = -EDEADLK; ++ goto unlock; ++ } + + err = 0; + __cpu_stop_queue_work(stopper1, work1, &wakeq); + __cpu_stop_queue_work(stopper2, work2, &wakeq); +- /* +- * The waking up of stopper threads has to happen +- * in the same scheduling context as the queueing. +- * Otherwise, there is a possibility of one of the +- * above stoppers being woken up by another CPU, +- * and preempting us. This will cause us to n ot +- * wake up the other stopper forever. +- */ +- preempt_disable(); ++ + unlock: + raw_spin_unlock(&stopper2->lock); + raw_spin_unlock_irq(&stopper1->lock); + + if (unlikely(err == -EDEADLK)) { ++ preempt_enable(); ++ + while (stop_cpus_in_progress) + cpu_relax(); ++ + goto retry; + } + +- if (!err) { +- wake_up_q(&wakeq); +- preempt_enable(); +- } ++ wake_up_q(&wakeq); ++ preempt_enable(); + + return err; + } +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index fbc75c84076e..b7302c37c064 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -8187,6 +8187,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) + tracing_off(); + + local_irq_save(flags); ++ printk_nmi_direct_enter(); + + /* Simulate the iterator */ + trace_init_global_iter(&iter); +@@ -8266,7 +8267,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) + for_each_tracing_cpu(cpu) { + atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); + } +- atomic_dec(&dump_running); ++ atomic_dec(&dump_running); ++ printk_nmi_direct_exit(); + local_irq_restore(flags); + } + EXPORT_SYMBOL_GPL(ftrace_dump); +diff --git a/kernel/watchdog.c b/kernel/watchdog.c +index c8e06703e44c..087994b23f8b 100644 +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -265,7 +265,7 @@ static void __touch_watchdog(void) + * entering idle state. This should only be used for scheduler events. + * Use touch_softlockup_watchdog() for everything else. + */ +-void touch_softlockup_watchdog_sched(void) ++notrace void touch_softlockup_watchdog_sched(void) + { + /* + * Preemption can be enabled. It doesn't matter which CPU's timestamp +@@ -274,7 +274,7 @@ void touch_softlockup_watchdog_sched(void) + raw_cpu_write(watchdog_touch_ts, 0); + } + +-void touch_softlockup_watchdog(void) ++notrace void touch_softlockup_watchdog(void) + { + touch_softlockup_watchdog_sched(); + wq_watchdog_touch(raw_smp_processor_id()); +diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c +index e449a23e9d59..4ece6028007a 100644 +--- a/kernel/watchdog_hld.c ++++ b/kernel/watchdog_hld.c +@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask; + static unsigned long hardlockup_allcpu_dumped; + static atomic_t watchdog_cpus = ATOMIC_INIT(0); + +-void arch_touch_nmi_watchdog(void) ++notrace void arch_touch_nmi_watchdog(void) + { + /* + * Using __raw here because some code paths have +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index d8a7f8939c81..08bc551976b2 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -5484,7 +5484,7 @@ static void wq_watchdog_timer_fn(unsigned long data) + mod_timer(&wq_watchdog_timer, jiffies + thresh); + } + +-void wq_watchdog_touch(int cpu) ++notrace void wq_watchdog_touch(int cpu) + { + if (cpu >= 0) + per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; +diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c +index 46e4c749e4eb..70b1f9d830cd 100644 +--- a/lib/nmi_backtrace.c ++++ b/lib/nmi_backtrace.c +@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, + + bool nmi_cpu_backtrace(struct pt_regs *regs) + { +- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; + int cpu = smp_processor_id(); + + if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { +- arch_spin_lock(&lock); + if (regs && cpu_in_idle(instruction_pointer(regs))) { + pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", + cpu, instruction_pointer(regs)); +@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) + else + dump_stack(); + } +- arch_spin_unlock(&lock); + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); + return true; + } +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index db69d938e9ed..6a9a7e1066ef 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4110,6 +4110,14 @@ static struct cftype mem_cgroup_legacy_files[] = { + + static DEFINE_IDR(mem_cgroup_idr); + ++static void mem_cgroup_id_remove(struct mem_cgroup *memcg) ++{ ++ if (memcg->id.id > 0) { ++ idr_remove(&mem_cgroup_idr, memcg->id.id); ++ memcg->id.id = 0; ++ } ++} ++ + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); +@@ -4120,8 +4128,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) < n); + if (atomic_sub_and_test(n, &memcg->id.ref)) { +- idr_remove(&mem_cgroup_idr, memcg->id.id); +- memcg->id.id = 0; ++ mem_cgroup_id_remove(memcg); + + /* Memcg ID pins CSS */ + css_put(&memcg->css); +@@ -4258,8 +4265,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); + return memcg; + fail: +- if (memcg->id.id > 0) +- idr_remove(&mem_cgroup_idr, memcg->id.id); ++ mem_cgroup_id_remove(memcg); + __mem_cgroup_free(memcg); + return NULL; + } +@@ -4318,6 +4324,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + + return &memcg->css; + fail: ++ mem_cgroup_id_remove(memcg); + mem_cgroup_free(memcg); + return ERR_PTR(-ENOMEM); + } +diff --git a/mm/memory.c b/mm/memory.c +index 5539b1975091..c9657f013a4d 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -246,9 +246,6 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) + + tlb_flush(tlb); + mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); +-#ifdef CONFIG_HAVE_RCU_TABLE_FREE +- tlb_table_flush(tlb); +-#endif + __tlb_reset_range(tlb); + } + +@@ -256,6 +253,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) + { + struct mmu_gather_batch *batch; + ++#ifdef CONFIG_HAVE_RCU_TABLE_FREE ++ tlb_table_flush(tlb); ++#endif + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { + free_pages_and_swap_cache(batch->pages, batch->nr); + batch->nr = 0; +@@ -331,6 +331,21 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ + * See the comment near struct mmu_table_batch. + */ + ++/* ++ * If we want tlb_remove_table() to imply TLB invalidates. ++ */ ++static inline void tlb_table_invalidate(struct mmu_gather *tlb) ++{ ++#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE ++ /* ++ * Invalidate page-table caches used by hardware walkers. Then we still ++ * need to RCU-sched wait while freeing the pages because software ++ * walkers can still be in-flight. ++ */ ++ tlb_flush_mmu_tlbonly(tlb); ++#endif ++} ++ + static void tlb_remove_table_smp_sync(void *arg) + { + /* Simply deliver the interrupt */ +@@ -367,6 +382,7 @@ void tlb_table_flush(struct mmu_gather *tlb) + struct mmu_table_batch **batch = &tlb->batch; + + if (*batch) { ++ tlb_table_invalidate(tlb); + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); + *batch = NULL; + } +@@ -388,11 +404,13 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) + if (*batch == NULL) { + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); + if (*batch == NULL) { ++ tlb_table_invalidate(tlb); + tlb_remove_table_one(table); + return; + } + (*batch)->nr = 0; + } ++ + (*batch)->tables[(*batch)->nr++] = table; + if ((*batch)->nr == MAX_TABLE_BATCH) + tlb_table_flush(tlb); +@@ -1417,11 +1435,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, + do { + next = pmd_addr_end(addr, end); + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { +- if (next - addr != HPAGE_PMD_SIZE) { +- VM_BUG_ON_VMA(vma_is_anonymous(vma) && +- !rwsem_is_locked(&tlb->mm->mmap_sem), vma); ++ if (next - addr != HPAGE_PMD_SIZE) + __split_huge_pmd(vma, pmd, addr, false, NULL); +- } else if (zap_huge_pmd(tlb, vma, pmd, addr)) ++ else if (zap_huge_pmd(tlb, vma, pmd, addr)) + goto next; + /* fall through */ + } +@@ -4350,6 +4366,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + return -EINVAL; + + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); ++ if (!maddr) ++ return -ENOMEM; ++ + if (write) + memcpy_toio(maddr + offset, buf, len); + else +diff --git a/mm/zswap.c b/mm/zswap.c +index 597008a44f70..ebb0bc88c5f7 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -989,6 +989,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + ret = -ENOMEM; + goto reject; + } ++ ++ /* A second zswap_is_full() check after ++ * zswap_shrink() to make sure it's now ++ * under the max_pool_percent ++ */ ++ if (zswap_is_full()) { ++ ret = -ENOMEM; ++ goto reject; ++ } + } + + /* allocate entry */ +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c +index 2d38b6e34203..98b62a7990aa 100644 +--- a/net/caif/caif_dev.c ++++ b/net/caif/caif_dev.c +@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) + caifd = caif_get(skb->dev); + + WARN_ON(caifd == NULL); +- if (caifd == NULL) ++ if (!caifd) { ++ rcu_read_unlock(); + return; ++ } + + caifd_hold(caifd); + rcu_read_unlock(); +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c +index 1307731ddfe4..832d69649cb6 100644 +--- a/net/core/lwt_bpf.c ++++ b/net/core/lwt_bpf.c +@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, + if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) + return -EINVAL; + +- prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); ++ prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); + if (!prog->name) + return -ENOMEM; + +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 89910e2c10f4..f112fef79216 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -651,8 +651,10 @@ skip_cow: + + sg_init_table(sg, nfrags); + ret = skb_to_sgvec(skb, sg, 0, skb->len); +- if (unlikely(ret < 0)) ++ if (unlikely(ret < 0)) { ++ kfree(tmp); + goto out; ++ } + + skb->ip_summed = CHECKSUM_NONE; + +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 0e0ab90a4334..b9e638cc955f 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + goto tx_err_dst_release; + } + +- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); +- skb_dst_set(skb, dst); +- skb->dev = skb_dst(skb)->dev; +- + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst_update_pmtu(skb, mtu); +@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + htonl(mtu)); + } + +- return -EMSGSIZE; ++ err = -EMSGSIZE; ++ goto tx_err_dst_release; + } + ++ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); ++ skb_dst_set(skb, dst); ++ skb->dev = skb_dst(skb)->dev; ++ + err = dst_output(t->net, skb->sk, skb); + if (net_xmit_eval(err) == 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 6aef6793d052..81f120466c38 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -2068,7 +2068,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) + if (!sta->uploaded) + continue; + +- if (sta->sdata->vif.type != NL80211_IFTYPE_AP) ++ if (sta->sdata->vif.type != NL80211_IFTYPE_AP && ++ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + continue; + + for (state = IEEE80211_STA_NOTEXIST; +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 9a945024a0b6..742aacb317e5 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -1480,7 +1480,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, + struct nft_base_chain *basechain; + struct nft_stats *stats = NULL; + struct nft_chain_hook hook; +- const struct nlattr *name; + struct nf_hook_ops *ops; + struct nft_trans *trans; + int err, i; +@@ -1531,12 +1530,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, + return PTR_ERR(stats); + } + ++ err = -ENOMEM; + trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, + sizeof(struct nft_trans_chain)); +- if (trans == NULL) { +- free_percpu(stats); +- return -ENOMEM; +- } ++ if (trans == NULL) ++ goto err; + + nft_trans_chain_stats(trans) = stats; + nft_trans_chain_update(trans) = true; +@@ -1546,19 +1544,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, + else + nft_trans_chain_policy(trans) = -1; + +- name = nla[NFTA_CHAIN_NAME]; +- if (nla[NFTA_CHAIN_HANDLE] && name) { +- nft_trans_chain_name(trans) = +- nla_strdup(name, GFP_KERNEL); +- if (!nft_trans_chain_name(trans)) { +- kfree(trans); +- free_percpu(stats); +- return -ENOMEM; ++ if (nla[NFTA_CHAIN_HANDLE] && ++ nla[NFTA_CHAIN_NAME]) { ++ struct nft_trans *tmp; ++ char *name; ++ ++ err = -ENOMEM; ++ name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); ++ if (!name) ++ goto err; ++ ++ err = -EEXIST; ++ list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { ++ if (tmp->msg_type == NFT_MSG_NEWCHAIN && ++ tmp->ctx.table == table && ++ nft_trans_chain_update(tmp) && ++ nft_trans_chain_name(tmp) && ++ strcmp(name, nft_trans_chain_name(tmp)) == 0) { ++ kfree(name); ++ goto err; ++ } + } ++ ++ nft_trans_chain_name(trans) = name; + } + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return 0; ++err: ++ free_percpu(stats); ++ kfree(trans); ++ return err; + } + + static int nf_tables_newchain(struct net *net, struct sock *nlsk, +@@ -5043,6 +5059,9 @@ static void nf_tables_commit_release(struct nft_trans *trans) + case NFT_MSG_DELTABLE: + nf_tables_table_destroy(&trans->ctx); + break; ++ case NFT_MSG_NEWCHAIN: ++ kfree(nft_trans_chain_name(trans)); ++ break; + case NFT_MSG_DELCHAIN: + nf_tables_chain_destroy(trans->ctx.chain); + break; +@@ -5100,13 +5119,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) + nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); + break; + case NFT_MSG_NEWCHAIN: +- if (nft_trans_chain_update(trans)) ++ if (nft_trans_chain_update(trans)) { + nft_chain_commit_update(trans); +- else ++ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); ++ /* trans destroyed after rcu grace period */ ++ } else { + nft_clear(net, trans->ctx.chain); +- +- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); +- nft_trans_destroy(trans); ++ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); ++ nft_trans_destroy(trans); ++ } + break; + case NFT_MSG_DELCHAIN: + list_del_rcu(&trans->ctx.chain->list); +@@ -5246,7 +5267,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) + case NFT_MSG_NEWCHAIN: + if (nft_trans_chain_update(trans)) { + free_percpu(nft_trans_chain_stats(trans)); +- ++ kfree(nft_trans_chain_name(trans)); + nft_trans_destroy(trans); + } else { + trans->ctx.table->use--; +diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c +index 9c0d5a7ce5f9..33aa2ac3a62e 100644 +--- a/net/netfilter/nft_set_hash.c ++++ b/net/netfilter/nft_set_hash.c +@@ -359,6 +359,7 @@ static void nft_rhash_destroy(const struct nft_set *set) + struct nft_rhash *priv = nft_set_priv(set); + + cancel_delayed_work_sync(&priv->gc_work); ++ rcu_barrier(); + rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, + (void *)set); + } +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 4cd351b74e48..753f3e73c498 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -4186,6 +4186,7 @@ static int parse_station_flags(struct genl_info *info, + params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_MFP) | + BIT(NL80211_STA_FLAG_AUTHORIZED); ++ break; + default: + return -EINVAL; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 9c57d6a5816c..a6c0027cadb5 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -2285,6 +2285,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) + return make_blackhole(net, dst_orig->ops->family, dst_orig); + ++ if (IS_ERR(dst)) ++ dst_release(dst_orig); ++ + return dst; + } + EXPORT_SYMBOL(xfrm_lookup_route); +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index dde40f995ac0..5554d28a32eb 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -1021,10 +1021,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, + { + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); + +- if (nlsk) +- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); +- else +- return -1; ++ if (!nlsk) { ++ kfree_skb(skb); ++ return -EPIPE; ++ } ++ ++ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); + } + + static inline size_t xfrm_spdinfo_msgsize(void) +diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h +index ffd1dfaa1cc1..f46750053377 100644 +--- a/scripts/gcc-plugins/gcc-common.h ++++ b/scripts/gcc-plugins/gcc-common.h +@@ -97,6 +97,10 @@ + #include "predict.h" + #include "ipa-utils.h" + ++#if BUILDING_GCC_VERSION >= 8000 ++#include "stringpool.h" ++#endif ++ + #if BUILDING_GCC_VERSION >= 4009 + #include "attribs.h" + #include "varasm.h" +diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c +index 65264960910d..cbe1d6c4b1a5 100644 +--- a/scripts/gcc-plugins/latent_entropy_plugin.c ++++ b/scripts/gcc-plugins/latent_entropy_plugin.c +@@ -255,21 +255,14 @@ static tree handle_latent_entropy_attribute(tree *node, tree name, + return NULL_TREE; + } + +-static struct attribute_spec latent_entropy_attr = { +- .name = "latent_entropy", +- .min_length = 0, +- .max_length = 0, +- .decl_required = true, +- .type_required = false, +- .function_type_required = false, +- .handler = handle_latent_entropy_attribute, +-#if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = false +-#endif +-}; ++static struct attribute_spec latent_entropy_attr = { }; + + static void register_attributes(void *event_data __unused, void *data __unused) + { ++ latent_entropy_attr.name = "latent_entropy"; ++ latent_entropy_attr.decl_required = true; ++ latent_entropy_attr.handler = handle_latent_entropy_attribute; ++ + register_attribute(&latent_entropy_attr); + } + +diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c +index 0073af326449..c4a345c3715b 100644 +--- a/scripts/gcc-plugins/randomize_layout_plugin.c ++++ b/scripts/gcc-plugins/randomize_layout_plugin.c +@@ -580,68 +580,35 @@ static void finish_type(void *event_data, void *data) + return; + } + +-static struct attribute_spec randomize_layout_attr = { +- .name = "randomize_layout", +- // related to args +- .min_length = 0, +- .max_length = 0, +- .decl_required = false, +- // need type declaration +- .type_required = true, +- .function_type_required = false, +- .handler = handle_randomize_layout_attr, +-#if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = true +-#endif +-}; ++static struct attribute_spec randomize_layout_attr = { }; ++static struct attribute_spec no_randomize_layout_attr = { }; ++static struct attribute_spec randomize_considered_attr = { }; ++static struct attribute_spec randomize_performed_attr = { }; + +-static struct attribute_spec no_randomize_layout_attr = { +- .name = "no_randomize_layout", +- // related to args +- .min_length = 0, +- .max_length = 0, +- .decl_required = false, +- // need type declaration +- .type_required = true, +- .function_type_required = false, +- .handler = handle_randomize_layout_attr, ++static void register_attributes(void *event_data, void *data) ++{ ++ randomize_layout_attr.name = "randomize_layout"; ++ randomize_layout_attr.type_required = true; ++ randomize_layout_attr.handler = handle_randomize_layout_attr; + #if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = true ++ randomize_layout_attr.affects_type_identity = true; + #endif +-}; + +-static struct attribute_spec randomize_considered_attr = { +- .name = "randomize_considered", +- // related to args +- .min_length = 0, +- .max_length = 0, +- .decl_required = false, +- // need type declaration +- .type_required = true, +- .function_type_required = false, +- .handler = handle_randomize_considered_attr, ++ no_randomize_layout_attr.name = "no_randomize_layout"; ++ no_randomize_layout_attr.type_required = true; ++ no_randomize_layout_attr.handler = handle_randomize_layout_attr; + #if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = false ++ no_randomize_layout_attr.affects_type_identity = true; + #endif +-}; + +-static struct attribute_spec randomize_performed_attr = { +- .name = "randomize_performed", +- // related to args +- .min_length = 0, +- .max_length = 0, +- .decl_required = false, +- // need type declaration +- .type_required = true, +- .function_type_required = false, +- .handler = handle_randomize_performed_attr, +-#if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = false +-#endif +-}; ++ randomize_considered_attr.name = "randomize_considered"; ++ randomize_considered_attr.type_required = true; ++ randomize_considered_attr.handler = handle_randomize_considered_attr; ++ ++ randomize_performed_attr.name = "randomize_performed"; ++ randomize_performed_attr.type_required = true; ++ randomize_performed_attr.handler = handle_randomize_performed_attr; + +-static void register_attributes(void *event_data, void *data) +-{ + register_attribute(&randomize_layout_attr); + register_attribute(&no_randomize_layout_attr); + register_attribute(&randomize_considered_attr); +diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c +index 3f8dd4868178..10292f791e99 100644 +--- a/scripts/gcc-plugins/structleak_plugin.c ++++ b/scripts/gcc-plugins/structleak_plugin.c +@@ -57,21 +57,16 @@ static tree handle_user_attribute(tree *node, tree name, tree args, int flags, b + return NULL_TREE; + } + +-static struct attribute_spec user_attr = { +- .name = "user", +- .min_length = 0, +- .max_length = 0, +- .decl_required = false, +- .type_required = false, +- .function_type_required = false, +- .handler = handle_user_attribute, +-#if BUILDING_GCC_VERSION >= 4007 +- .affects_type_identity = true +-#endif +-}; ++static struct attribute_spec user_attr = { }; + + static void register_attributes(void *event_data, void *data) + { ++ user_attr.name = "user"; ++ user_attr.handler = handle_user_attribute; ++#if BUILDING_GCC_VERSION >= 4007 ++ user_attr.affects_type_identity = true; ++#endif ++ + register_attribute(&user_attr); + } + +diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c +index 77e7dcf969d0..d70fcd4a1adf 100644 +--- a/sound/soc/sirf/sirf-usp.c ++++ b/sound/soc/sirf/sirf-usp.c +@@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, usp); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- base = devm_ioremap(&pdev->dev, mem_res->start, +- resource_size(mem_res)); +- if (base == NULL) +- return -ENOMEM; ++ base = devm_ioremap_resource(&pdev->dev, mem_res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + usp->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &sirf_usp_regmap_config); + if (IS_ERR(usp->regmap)) +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 3d0dab8282ad..6fc85199ac73 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1607,6 +1607,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream) + int i; + + for (i = 0; i < be->num_codecs; i++) { ++ /* ++ * Skip CODECs which don't support the current stream ++ * type. See soc_pcm_init_runtime_hw() for more details ++ */ ++ if (!snd_soc_dai_stream_valid(be->codec_dais[i], ++ stream)) ++ continue; ++ + codec_dai_drv = be->codec_dais[i]->driver; + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + codec_stream = &codec_dai_drv->playback; +diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c +index dc955272f58b..389272eeba9a 100644 +--- a/sound/soc/zte/zx-tdm.c ++++ b/sound/soc/zte/zx-tdm.c +@@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on) + #define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000) + + #define ZX_TDM_FMTBIT \ +- (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \ +- SNDRV_PCM_FORMAT_A_LAW) ++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \ ++ SNDRV_PCM_FMTBIT_A_LAW) + + static int zx_tdm_dai_probe(struct snd_soc_dai *dai) + { +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c +index bd9c6b31a504..1512086c8cb8 100644 +--- a/tools/power/x86/turbostat/turbostat.c ++++ b/tools/power/x86/turbostat/turbostat.c +@@ -1038,9 +1038,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ + if (!printed || !summary_only) + print_header("\t"); + +- if (topo.num_cpus > 1) +- format_counters(&average.threads, &average.cores, +- &average.packages); ++ format_counters(&average.threads, &average.cores, &average.packages); + + printed = 1; + +@@ -4031,7 +4029,9 @@ void process_cpuid() + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + stepping = fms & 0xf; +- if (family == 6 || family == 0xf) ++ if (family == 0xf) ++ family += (fms >> 20) & 0xff; ++ if (family >= 6) + model += ((fms >> 16) & 0xf) << 4; + + if (!quiet) { +diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +new file mode 100644 +index 000000000000..3b1f45e13a2e +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +@@ -0,0 +1,28 @@ ++#!/bin/sh ++# description: Snapshot and tracing setting ++# flags: instance ++ ++[ ! -f snapshot ] && exit_unsupported ++ ++echo "Set tracing off" ++echo 0 > tracing_on ++ ++echo "Allocate and take a snapshot" ++echo 1 > snapshot ++ ++# Since trace buffer is empty, snapshot is also empty, but allocated ++grep -q "Snapshot is allocated" snapshot ++ ++echo "Ensure keep tracing off" ++test `cat tracing_on` -eq 0 ++ ++echo "Set tracing on" ++echo 1 > tracing_on ++ ++echo "Take a snapshot again" ++echo 1 > snapshot ++ ++echo "Ensure keep tracing on" ++test `cat tracing_on` -eq 1 ++ ++exit 0 +diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c +index 95dd14648ba5..0f395dfb7774 100644 +--- a/tools/usb/ffs-test.c ++++ b/tools/usb/ffs-test.c +@@ -44,12 +44,25 @@ + + /******************** Little Endian Handling ********************************/ + +-#define cpu_to_le16(x) htole16(x) +-#define cpu_to_le32(x) htole32(x) ++/* ++ * cpu_to_le16/32 are used when initializing structures, a context where a ++ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way ++ * that allows them to be used when initializing structures. ++ */ ++ ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++#define cpu_to_le16(x) (x) ++#define cpu_to_le32(x) (x) ++#else ++#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) ++#define cpu_to_le32(x) \ ++ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ ++ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) ++#endif ++ + #define le32_to_cpu(x) le32toh(x) + #define le16_to_cpu(x) le16toh(x) + +- + /******************** Messages and Errors ***********************************/ + + static const char argv0[] = "ffs-test"; +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c +index b69798a7880e..ec275b8472a9 100644 +--- a/virt/kvm/arm/mmu.c ++++ b/virt/kvm/arm/mmu.c +@@ -901,19 +901,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + pmd = stage2_get_pmd(kvm, cache, addr); + VM_BUG_ON(!pmd); + +- /* +- * Mapping in huge pages should only happen through a fault. If a +- * page is merged into a transparent huge page, the individual +- * subpages of that huge page should be unmapped through MMU +- * notifiers before we get here. +- * +- * Merging of CompoundPages is not supported; they should become +- * splitting first, unmapped, merged, and mapped back in on-demand. +- */ +- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); +- + old_pmd = *pmd; + if (pmd_present(old_pmd)) { ++ /* ++ * Multiple vcpus faulting on the same PMD entry, can ++ * lead to them sequentially updating the PMD with the ++ * same value. Following the break-before-make ++ * (pmd_clear() followed by tlb_flush()) process can ++ * hinder forward progress due to refaults generated ++ * on missing translations. ++ * ++ * Skip updating the page table if the entry is ++ * unchanged. ++ */ ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd)) ++ return 0; ++ ++ /* ++ * Mapping in huge pages should only happen through a ++ * fault. If a page is merged into a transparent huge ++ * page, the individual subpages of that huge page ++ * should be unmapped through MMU notifiers before we ++ * get here. ++ * ++ * Merging of CompoundPages is not supported; they ++ * should become splitting first, unmapped, merged, ++ * and mapped back in on-demand. ++ */ ++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); ++ + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +@@ -969,6 +985,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + if (pte_present(old_pte)) { ++ /* Skip page table update if there is no change */ ++ if (pte_val(old_pte) == pte_val(*new_pte)) ++ return 0; ++ + kvm_set_pte(pte, __pte(0)); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { |