diff options
author | 2015-03-28 17:58:56 -0400 | |
---|---|---|
committer | 2015-03-28 17:58:56 -0400 | |
commit | d332fb06507d41e7224741bc0a33c63d5506acb8 (patch) | |
tree | b5ec15bc9e82d6a361bdd1099fbaed4044b1f44e | |
parent | Add check to saved_root_name for supported filesystem path naming. (diff) | |
download | linux-patches-d332fb06507d41e7224741bc0a33c63d5506acb8.tar.gz linux-patches-d332fb06507d41e7224741bc0a33c63d5506acb8.tar.bz2 linux-patches-d332fb06507d41e7224741bc0a33c63d5506acb8.zip |
Linux patch 3.12.393.12-39
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1038_linux-3.12.39.patch | 6134 |
2 files changed, 6138 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 12b42452..a488cd43 100644 --- a/0000_README +++ b/0000_README @@ -194,6 +194,10 @@ Patch: 1037_linux-3.12.38.patch From: http://www.kernel.org Desc: Linux 3.12.38 +Patch: 1038_linux-3.12.39.patch +From: http://www.kernel.org +Desc: Linux 3.12.39 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1038_linux-3.12.39.patch b/1038_linux-3.12.39.patch new file mode 100644 index 00000000..398987d7 --- /dev/null +++ b/1038_linux-3.12.39.patch @@ -0,0 +1,6134 @@ +diff --git a/Makefile b/Makefile +index 0cd1625c4fae..18a1d91bda79 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 38 ++SUBLEVEL = 39 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index 98838a05ba6d..9d0ac091a52a 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -156,6 +156,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h +index 6b0b7f7ef783..7670f33b9ce2 100644 +--- a/arch/arc/include/asm/pgtable.h ++++ b/arch/arc/include/asm/pgtable.h +@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) + #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) + + #define pte_page(x) (mem_map + \ +- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT))) ++ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ ++ PAGE_SHIFT))) + + #define mk_pte(page, pgprot) \ + ({ \ +diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h +index 15334ab66b56..fb95aa807215 100644 +--- a/arch/arc/include/asm/processor.h ++++ b/arch/arc/include/asm/processor.h +@@ -69,18 +69,19 @@ unsigned long thread_saved_pc(struct task_struct *t); + #define release_segments(mm) do { } while (0) + + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) ++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) + + /* + * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. + * Look in process.c for details of kernel stack layout + */ +-#define KSTK_ESP(tsk) (tsk->thread.ksp) ++#define TSK_K_ESP(tsk) (tsk->thread.ksp) + +-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ ++#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ + sizeof(struct callee_regs) + off))) + +-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) +-#define KSTK_FP(tsk) KSTK_REG(tsk, 0) ++#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) ++#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) + + /* + * Do necessary setup to start up a newly executed thread. +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c +index f8b7d880304d..9c9e1d3ec5fe 100644 +--- a/arch/arc/kernel/stacktrace.c ++++ b/arch/arc/kernel/stacktrace.c +@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk, + + frame_info->task = tsk; + +- frame_info->regs.r27 = KSTK_FP(tsk); +- frame_info->regs.r28 = KSTK_ESP(tsk); +- frame_info->regs.r31 = KSTK_BLINK(tsk); ++ frame_info->regs.r27 = TSK_K_FP(tsk); ++ frame_info->regs.r28 = TSK_K_ESP(tsk); ++ frame_info->regs.r31 = TSK_K_BLINK(tsk); + frame_info->regs.r63 = (unsigned int)__switch_to; + + /* In the prologue of __switch_to, first FP is saved on stack +diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c +index 0c14d8a52683..babd9462d2c4 100644 +--- a/arch/arc/mm/fault.c ++++ b/arch/arc/mm/fault.c +@@ -162,6 +162,8 @@ good_area: + /* TBD: switch to pagefault_out_of_memory() */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + +diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi +index e6e952e32117..b9d31187d0de 100644 +--- a/arch/arm/boot/dts/am335x-bone-common.dtsi ++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi +@@ -134,6 +134,7 @@ + + usb@47401000 { + status = "okay"; ++ dr_mode = "peripheral"; + }; + + usb@47401800 { +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +index 18f333c440db..3d41b06a9926 100644 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +@@ -1669,7 +1669,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = { + .class = &dra7xx_uart_hwmod_class, + .clkdm_name = "l4per_clkdm", + .main_clk = "uart3_gfclk_mux", +- .flags = HWMOD_SWSUP_SIDLE_ACT, ++ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS, + .prcm = { + .omap4 = { + .clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET, +diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c +index f162f1b77cd2..82fd9dd17ed1 100644 +--- a/arch/arm/mach-pxa/corgi.c ++++ b/arch/arm/mach-pxa/corgi.c +@@ -26,6 +26,7 @@ + #include <linux/i2c.h> + #include <linux/i2c/pxa-i2c.h> + #include <linux/io.h> ++#include <linux/regulator/machine.h> + #include <linux/spi/spi.h> + #include <linux/spi/ads7846.h> + #include <linux/spi/corgi_lcd.h> +@@ -711,6 +712,8 @@ static void __init corgi_init(void) + sharpsl_nand_partitions[1].size = 53 * 1024 * 1024; + + platform_add_devices(devices, ARRAY_SIZE(devices)); ++ ++ regulator_has_full_constraints(); + } + + static void __init fixup_corgi(struct tag *tags, char **cmdline, +diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c +index 133109ec7332..a07accfb3aec 100644 +--- a/arch/arm/mach-pxa/hx4700.c ++++ b/arch/arm/mach-pxa/hx4700.c +@@ -891,6 +891,8 @@ static void __init hx4700_init(void) + mdelay(10); + gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1); + mdelay(10); ++ ++ regulator_has_full_constraints(); + } + + MACHINE_START(H4700, "HP iPAQ HX4700") +diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c +index aedf053a1de5..b4fff2998b8a 100644 +--- a/arch/arm/mach-pxa/poodle.c ++++ b/arch/arm/mach-pxa/poodle.c +@@ -25,6 +25,7 @@ + #include <linux/gpio.h> + #include <linux/i2c.h> + #include <linux/i2c/pxa-i2c.h> ++#include <linux/regulator/machine.h> + #include <linux/spi/spi.h> + #include <linux/spi/ads7846.h> + #include <linux/spi/pxa2xx_spi.h> +@@ -454,6 +455,7 @@ static void __init poodle_init(void) + pxa_set_i2c_info(NULL); + i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices)); + poodle_init_spi(); ++ regulator_has_full_constraints(); + } + + static void __init fixup_poodle(struct tag *tags, char **cmdline, +diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c +index 6645d1e31f14..34853d5dfda2 100644 +--- a/arch/arm/mach-sa1100/pm.c ++++ b/arch/arm/mach-sa1100/pm.c +@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state) + /* + * Ensure not to come back here if it wasn't intended + */ ++ RCSR = RCSR_SMR; + PSPR = 0; + + /* +diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c +index 66781bf34077..c72412415093 100644 +--- a/arch/arm/mm/hugetlbpage.c ++++ b/arch/arm/mm/hugetlbpage.c +@@ -36,12 +36,6 @@ + * of type casting from pmd_t * to pte_t *. + */ + +-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, +- int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pud_huge(pud_t pud) + { + return 0; +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c +index e393174fe859..3d478102b1c0 100644 +--- a/arch/arm64/kernel/signal32.c ++++ b/arch/arm64/kernel/signal32.c +@@ -179,8 +179,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) + case __SI_TIMER: + err |= __put_user(from->si_tid, &to->si_tid); + err |= __put_user(from->si_overrun, &to->si_overrun); +- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, +- &to->si_ptr); ++ err |= __put_user(from->si_int, &to->si_int); + break; + case __SI_POLL: + err |= __put_user(from->si_band, &to->si_band); +@@ -209,7 +208,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) + case __SI_MESGQ: /* But this is */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); +- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); ++ err |= __put_user(from->si_int, &to->si_int); + break; + default: /* this is just in case for now ... */ + err |= __put_user(from->si_pid, &to->si_pid); +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c +index 023747bf4dd7..2de9d2e59d96 100644 +--- a/arch/arm64/mm/hugetlbpage.c ++++ b/arch/arm64/mm/hugetlbpage.c +@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + } + #endif + +-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, +- int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return !(pmd_val(pmd) & PMD_TABLE_BIT); +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c +index 0eca93327195..d223a8b57c1e 100644 +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -142,6 +142,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c +index 1790f22e71a2..2686a7aa8ec8 100644 +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -176,6 +176,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c +index 9a66372fc7c7..ec4917ddf678 100644 +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c +index 7225dad87094..ba5ba7accd0d 100644 +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -172,6 +172,8 @@ retry: + */ + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto bad_area; + } else if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + goto bad_area; +diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c +index 76069c18ee42..52b7604b5215 100644 +--- a/arch/ia64/mm/hugetlbpage.c ++++ b/arch/ia64/mm/hugetlbpage.c +@@ -114,12 +114,6 @@ int pud_huge(pud_t pud) + return 0; + } + +-struct page * +-follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) +-{ +- return NULL; +-} +- + void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c +index e9c6a8014bd6..e3d4d4890104 100644 +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -200,6 +200,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c +index eb1d61f68725..f0eef0491f77 100644 +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -153,6 +153,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto map_err; + else if (fault & VM_FAULT_SIGBUS) + goto bus_err; + BUG(); +diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h +index 3be8581af495..ba857382ba65 100644 +--- a/arch/metag/include/asm/processor.h ++++ b/arch/metag/include/asm/processor.h +@@ -149,8 +149,8 @@ extern void exit_thread(void); + + unsigned long get_wchan(struct task_struct *p); + +-#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) +-#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) ++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) ++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) + + #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) + +diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c +index 332680e5ebf2..2de5dc695a87 100644 +--- a/arch/metag/mm/fault.c ++++ b/arch/metag/mm/fault.c +@@ -141,6 +141,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c +index 3c52fa6d0f8e..745081427659 100644 +--- a/arch/metag/mm/hugetlbpage.c ++++ b/arch/metag/mm/hugetlbpage.c +@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + return 0; + } + +-struct page *follow_huge_addr(struct mm_struct *mm, +- unsigned long address, int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return pmd_page_shift(pmd) > PAGE_SHIFT; +diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c +index fa4cf52aa7a6..d46a5ebb7570 100644 +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -224,6 +224,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c +index 6e58e97fcd39..cedeb5686eb5 100644 +--- a/arch/mips/kernel/mips_ksyms.c ++++ b/arch/mips/kernel/mips_ksyms.c +@@ -14,6 +14,7 @@ + #include <linux/mm.h> + #include <asm/uaccess.h> + #include <asm/ftrace.h> ++#include <asm/fpu.h> + + extern void *__bzero(void *__s, size_t __count); + extern long __strncpy_from_user_nocheck_asm(char *__to, +@@ -26,6 +27,13 @@ extern long __strnlen_user_nocheck_asm(const char *s); + extern long __strnlen_user_asm(const char *s); + + /* ++ * Core architecture code ++ */ ++#ifdef CONFIG_CPU_R4K_FPU ++EXPORT_SYMBOL_GPL(_save_fp); ++#endif ++ ++/* + * String functions + */ + EXPORT_SYMBOL(memset); +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S +index bbace092ad0a..03a2db58b22d 100644 +--- a/arch/mips/kvm/kvm_locore.S ++++ b/arch/mips/kvm/kvm_locore.S +@@ -428,7 +428,7 @@ __kvm_mips_return_to_guest: + /* Setup status register for running guest in UM */ + .set at + or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) +- and v1, v1, ~ST0_CU0 ++ and v1, v1, ~(ST0_CU0 | ST0_MX) + .set noat + mtc0 v1, CP0_STATUS + ehb +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 3f3e5b2b2f38..2cb24788a8a6 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -15,6 +15,7 @@ + #include <linux/vmalloc.h> + #include <linux/fs.h> + #include <linux/bootmem.h> ++#include <asm/fpu.h> + #include <asm/page.h> + #include <asm/cacheflush.h> + #include <asm/mmu_context.h> +@@ -417,11 +418,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) + vcpu->mmio_needed = 0; + } + ++ lose_fpu(1); ++ ++ local_irq_disable(); + /* Check if we have any exceptions/interrupts pending */ + kvm_mips_deliver_interrupts(vcpu, + kvm_read_c0_guest_cause(vcpu->arch.cop0)); + +- local_irq_disable(); + kvm_guest_enter(); + + r = __kvm_mips_vcpu_run(run, vcpu); +@@ -1021,9 +1024,6 @@ void kvm_mips_set_c0_status(void) + { + uint32_t status = read_c0_status(); + +- if (cpu_has_fpu) +- status |= (ST0_CU1); +- + if (cpu_has_dsp) + status |= (ST0_MX); + +diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h +index bc9e0f406c08..e51621e36152 100644 +--- a/arch/mips/kvm/trace.h ++++ b/arch/mips/kvm/trace.h +@@ -26,18 +26,18 @@ TRACE_EVENT(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( +- __field(struct kvm_vcpu *, vcpu) ++ __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( +- __entry->vcpu = vcpu; ++ __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + kvm_mips_exit_types_str[__entry->reason], +- __entry->vcpu->arch.pc) ++ __entry->pc) + ); + + #endif /* _TRACE_KVM_H */ +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c +index becc42bb1849..70ab5d664332 100644 +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -158,6 +158,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c +index a7fee0dfb7a9..e656e7f61e65 100644 +--- a/arch/mips/mm/hugetlbpage.c ++++ b/arch/mips/mm/hugetlbpage.c +@@ -69,12 +69,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) + return 0; + } + +-struct page * +-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return (pmd_val(pmd) & _PAGE_HUGE) != 0; +@@ -84,15 +78,3 @@ int pud_huge(pud_t pud) + { + return (pud_val(pud) & _PAGE_HUGE) != 0; + } +- +-struct page * +-follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- struct page *page; +- +- page = pte_page(*(pte_t *)pmd); +- if (page) +- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); +- return page; +-} +diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c +index 3516cbdf1ee9..0c2cc5d39c8e 100644 +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -262,6 +262,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c +index 0703acf7d327..230ac20ae794 100644 +--- a/arch/openrisc/mm/fault.c ++++ b/arch/openrisc/mm/fault.c +@@ -171,6 +171,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index 0293588d5b8c..0dda59ccc98d 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -226,6 +226,8 @@ good_area: + */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto bad_area; + BUG(); +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 51ab9e7e6c39..010fabf3828c 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -432,6 +432,8 @@ good_area: + */ + fault = handle_mm_fault(mm, vma, address, flags); + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { ++ if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + rc = mm_fault_error(regs, address, fault); + if (rc >= MM_FAULT_RETURN) + goto bail; +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c +index 834ca8eb38f2..fc2427323414 100644 +--- a/arch/powerpc/mm/hugetlbpage.c ++++ b/arch/powerpc/mm/hugetlbpage.c +@@ -706,6 +706,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, + return NULL; + } + ++struct page * ++follow_huge_pud(struct mm_struct *mm, unsigned long address, ++ pud_t *pud, int write) ++{ ++ BUG(); ++ return NULL; ++} ++ + static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, + unsigned long sz) + { +diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c +index 641e7273d75a..62f3e4e48a0b 100644 +--- a/arch/powerpc/platforms/cell/spu_fault.c ++++ b/arch/powerpc/platforms/cell/spu_fault.c +@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, + if (*flt & VM_FAULT_OOM) { + ret = -ENOMEM; + goto out_unlock; +- } else if (*flt & VM_FAULT_SIGBUS) { ++ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + ret = -EFAULT; + goto out_unlock; + } +diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c +index 1c16141c031c..1fea24944ff4 100644 +--- a/arch/powerpc/sysdev/axonram.c ++++ b/arch/powerpc/sysdev/axonram.c +@@ -155,7 +155,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector, + } + + *kaddr = (void *)(bank->ph_addr + offset); +- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; ++ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; + + return 0; + } +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index fc6679210d83..b53f37fbe056 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -244,6 +244,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) + do_no_context(regs); + else + pagefault_out_of_memory(); ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ /* Kernel mode? Handle exceptions or die */ ++ if (!user_mode(regs)) ++ do_no_context(regs); ++ else ++ do_sigsegv(regs, SEGV_MAPERR); + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) +diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c +index 248445f92604..99a68d579828 100644 +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -204,12 +204,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + return 0; + } + +-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, +- int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + if (!MACHINE_HAS_HPAGE) +@@ -222,17 +216,3 @@ int pud_huge(pud_t pud) + { + return 0; + } +- +-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmdp, int write) +-{ +- struct page *page; +- +- if (!MACHINE_HAS_HPAGE) +- return NULL; +- +- page = pmd_page(*pmdp); +- if (page) +- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); +- return page; +-} +diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c +index 52238983527d..6860beb2a280 100644 +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -114,6 +114,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c +index 541dc6101508..a58fec9b55e0 100644 +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + } else { + if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area(regs, error_code, address); + else + BUG(); + } +diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c +index d7762349ea48..534bc978af8a 100644 +--- a/arch/sh/mm/hugetlbpage.c ++++ b/arch/sh/mm/hugetlbpage.c +@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + return 0; + } + +-struct page *follow_huge_addr(struct mm_struct *mm, +- unsigned long address, int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return 0; +@@ -82,9 +76,3 @@ int pud_huge(pud_t pud) + { + return 0; + } +- +-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- return NULL; +-} +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c +index 59dbd4645725..163c78712110 100644 +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -252,6 +252,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 603e462a210e..c7009d7762b1 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -446,6 +446,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index 8545f62fa62c..d941cd024f22 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -216,12 +216,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + return entry; + } + +-struct page *follow_huge_addr(struct mm_struct *mm, +- unsigned long address, int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return 0; +@@ -231,9 +225,3 @@ int pud_huge(pud_t pud) + { + return 0; + } +- +-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- return NULL; +-} +diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c +index 6c0571216a9d..c6d2a76d91a8 100644 +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -444,6 +444,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c +index e514899e1100..8a00c7b7b862 100644 +--- a/arch/tile/mm/hugetlbpage.c ++++ b/arch/tile/mm/hugetlbpage.c +@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) + return NULL; + } + +-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, +- int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- + int pmd_huge(pmd_t pmd) + { + return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); +@@ -166,28 +160,6 @@ int pud_huge(pud_t pud) + return !!(pud_val(pud) & _PAGE_HUGE_PAGE); + } + +-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- struct page *page; +- +- page = pte_page(*(pte_t *)pmd); +- if (page) +- page += ((address & ~PMD_MASK) >> PAGE_SHIFT); +- return page; +-} +- +-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, +- pud_t *pud, int write) +-{ +- struct page *page; +- +- page = pte_page(*(pte_t *)pud); +- if (page) +- page += ((address & ~PUD_MASK) >> PAGE_SHIFT); +- return page; +-} +- + int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) + { + return 0; +diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c +index 5c3aef74237f..06ab0ebe0a0f 100644 +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -80,6 +80,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto out; + } else if (fault & VM_FAULT_SIGBUS) { + err = -EACCES; + goto out; +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index e96560628571..7b22af265d12 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -542,11 +542,14 @@ ENTRY(ret_from_fork) + testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + jz 1f + +- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET +- jnz int_ret_from_sys_call +- +- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET +- jmp ret_from_sys_call # go to the SYSRET fastpath ++ /* ++ * By the time we get here, we have no idea whether our pt_regs, ++ * ti flags, and ti status came from the 64-bit SYSCALL fast path, ++ * the slow path, or one of the ia32entry paths. ++ * Use int_ret_from_sys_call to return, since it can safely handle ++ * all of the above. ++ */ ++ jmp int_ret_from_sys_call + + 1: + subq $REST_SKIP, %rsp # leave space for volatiles +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 8ab43ac68f06..c412bab82d1f 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -4617,7 +4617,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + if (rc != X86EMUL_CONTINUE) + goto done; + } +- ctxt->dst.orig_val = ctxt->dst.val; ++ /* Copy full 64-bit value for CMPXCHG8B. */ ++ ctxt->dst.orig_val64 = ctxt->dst.val64; + + special_insn: + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index fabb62bad47c..d3691ab6d6a0 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1171,21 +1171,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) + { + #ifdef CONFIG_X86_64 + bool vcpus_matched; +- bool do_request = false; + struct kvm_arch *ka = &vcpu->kvm->arch; + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + + vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == + atomic_read(&vcpu->kvm->online_vcpus)); + +- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) +- if (!ka->use_master_clock) +- do_request = 1; +- +- if (!vcpus_matched && ka->use_master_clock) +- do_request = 1; +- +- if (do_request) ++ /* ++ * Once the masterclock is enabled, always perform request in ++ * order to update it. ++ * ++ * In order to enable masterclock, the host clocksource must be TSC ++ * and the vcpus need to have matched TSCs. When that happens, ++ * perform request to enable masterclock. ++ */ ++ if (ka->use_master_clock || ++ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) + kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); + + trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index 5b90bbcad9f6..814a25d88738 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, + unsigned int fault) + { + struct task_struct *tsk = current; +- struct mm_struct *mm = tsk->mm; + int code = BUS_ADRERR; + +- up_read(&mm->mmap_sem); +- + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); +@@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + unsigned long address, unsigned int fault) + { + if (fatal_signal_pending(current) && !(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, 0, 0); + return; + } +@@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { +- up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address, + SIGSEGV, SEGV_MAPERR); + return; + } + +- up_read(¤t->mm->mmap_sem); +- + /* + * We ran out of memory, call the OOM killer, and return the + * userspace (which will retry the fault, or kill us if we got +@@ -873,6 +866,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + do_sigbus(regs, error_code, address, fault); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area_nosemaphore(regs, error_code, address); + else + BUG(); + } +@@ -1193,6 +1188,7 @@ good_area: + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { ++ up_read(&mm->mmap_sem); + mm_fault_error(regs, error_code, address, fault); + return; + } +diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c +index 0596e8e0cc19..5bb7b365c519 100644 +--- a/arch/x86/mm/gup.c ++++ b/arch/x86/mm/gup.c +@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return 0; +- if (unlikely(pmd_large(pmd))) { ++ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index fa029fb2afae..9d80a1b5dc86 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -52,23 +52,17 @@ int pud_huge(pud_t pud) + return 0; + } + +-struct page * +-follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- return NULL; +-} + #else + +-struct page * +-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) +-{ +- return ERR_PTR(-EINVAL); +-} +- ++/* ++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal ++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. ++ * Otherwise, returns 0. ++ */ + int pmd_huge(pmd_t pmd) + { +- return !!(pmd_val(pmd) & _PAGE_PSE); ++ return !pmd_none(pmd) && ++ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; + } + + int pud_huge(pud_t pud) +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 25e7e1372bb2..3601ff284b92 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -35,12 +35,12 @@ struct __read_mostly va_alignment va_align = { + .flags = -1, + }; + +-static unsigned int stack_maxrandom_size(void) ++static unsigned long stack_maxrandom_size(void) + { +- unsigned int max = 0; ++ unsigned long max = 0; + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { +- max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; ++ max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; + } + + return max; +diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c +index 70fa7bc42b4a..38278337d85e 100644 +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -117,6 +117,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +diff --git a/block/blk-throttle.c b/block/blk-throttle.c +index 8331aba9426f..ca3794e17755 100644 +--- a/block/blk-throttle.c ++++ b/block/blk-throttle.c +@@ -1282,6 +1282,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, + struct blkg_rwstat rwstat = { }, tmp; + int i, cpu; + ++ if (tg->stats_cpu == NULL) ++ return 0; ++ + for_each_possible_cpu(cpu) { + struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); + +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 06c2bab69756..b19c9f391761 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -3575,6 +3575,11 @@ retry: + + blkcg = bio_blkcg(bio); + cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); ++ if (!cfqg) { ++ cfqq = &cfqd->oom_cfqq; ++ goto out; ++ } ++ + cfqq = cic_to_cfqq(cic, is_sync); + + /* +@@ -3611,7 +3616,7 @@ retry: + } else + cfqq = &cfqd->oom_cfqq; + } +- ++out: + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + +@@ -3641,12 +3646,17 @@ static struct cfq_queue * + cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, + struct bio *bio, gfp_t gfp_mask) + { +- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); +- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); ++ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); ++ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + struct cfq_queue **async_cfqq = NULL; + struct cfq_queue *cfqq = NULL; + + if (!is_sync) { ++ if (!ioprio_valid(cic->ioprio)) { ++ struct task_struct *tsk = current; ++ ioprio = task_nice_ioprio(tsk); ++ ioprio_class = task_nice_ioclass(tsk); ++ } + async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); + cfqq = *async_cfqq; + } +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index 47e4deb9dfcd..ff5ec8ecc257 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -1857,6 +1857,17 @@ EXPORT_SYMBOL(acpi_video_unregister); + + static int __init acpi_video_init(void) + { ++ /* ++ * Let the module load even if ACPI is disabled (e.g. due to ++ * a broken BIOS) so that i915.ko can still be loaded on such ++ * old systems without an AcpiOpRegion. ++ * ++ * acpi_video_register() will report -ENODEV later as well due ++ * to acpi_disabled when i915.ko tries to register itself afterwards. ++ */ ++ if (acpi_disabled) ++ return 0; ++ + dmi_check_system(video_dmi_table); + + if (intel_opregion_present()) +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index fa6a79009724..9e925bf9ac57 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = { + { USB_DEVICE(0x0489, 0xe057) }, + { USB_DEVICE(0x0489, 0xe056) }, + { USB_DEVICE(0x0489, 0xe05f) }, ++ { USB_DEVICE(0x0489, 0xe078) }, + { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x04CA, 0x3004) }, + { USB_DEVICE(0x04CA, 0x3005) }, +@@ -130,6 +131,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 64f19159515f..faa9a387f9a5 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -156,6 +156,7 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c +index 48138b311460..23c71e7a875f 100644 +--- a/drivers/char/tpm/tpm.c ++++ b/drivers/char/tpm/tpm.c +@@ -1583,7 +1583,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, + + /* Make chip available */ + spin_lock(&driver_lock); +- list_add_rcu(&chip->list, &tpm_chip_list); ++ list_add_tail_rcu(&chip->list, &tpm_chip_list); + spin_unlock(&driver_lock); + + return chip; +diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c +index 06af39ca901e..3f9edcd33f65 100644 +--- a/drivers/char/tpm/tpm_i2c_stm_st33.c ++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c +@@ -488,7 +488,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf, + if (burstcnt < 0) + return burstcnt; + size = min_t(int, len - i - 1, burstcnt); +- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size); ++ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size); + if (ret < 0) + goto out_err; + +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index 56b07c35a13e..538856f3e68a 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) + crq.len = (u16)count; + crq.data = ibmvtpm->rtce_dma_handle; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), ++ cpu_to_be64(word[1])); + if (rc != H_SUCCESS) { + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); + rc = 0; +@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); +@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_VERSION; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_version failed rc=%d\n", rc); +@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) + static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) + { + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); ++ ++ /* ibmvtpm initializes at probe time, so the data we are ++ * asking for may not be set yet. Estimate that 4K required ++ * for TCE-mapped buffer in addition to CRQ. ++ */ ++ if (!ibmvtpm) ++ return CRQ_RES_BUF_SIZE + PAGE_SIZE; ++ + return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; + } + +@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "tpm_ibmvtpm_suspend failed rc=%d\n", rc); +@@ -511,11 +523,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + case IBMVTPM_VALID_CMD: + switch (crq->msg) { + case VTPM_GET_RTCE_BUFFER_SIZE_RES: +- if (crq->len <= 0) { ++ if (be16_to_cpu(crq->len) <= 0) { + dev_err(ibmvtpm->dev, "Invalid rtce size\n"); + return; + } +- ibmvtpm->rtce_size = crq->len; ++ ibmvtpm->rtce_size = be16_to_cpu(crq->len); + ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, + GFP_KERNEL); + if (!ibmvtpm->rtce_buf) { +@@ -536,11 +548,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + + return; + case VTPM_GET_VERSION_RES: +- ibmvtpm->vtpm_version = crq->data; ++ ibmvtpm->vtpm_version = be32_to_cpu(crq->data); + return; + case VTPM_TPM_COMMAND_RES: + /* len of the data in rtce buffer */ +- ibmvtpm->res_len = crq->len; ++ ibmvtpm->res_len = be16_to_cpu(crq->len); + wake_up_interruptible(&ibmvtpm->wq); + return; + default: +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index e7b1a0ae4300..7f8598387702 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -75,6 +75,10 @@ enum tis_defaults { + #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) + #define TPM_RID(l) (0x0F04 | ((l) << 12)) + ++struct priv_data { ++ bool irq_tested; ++}; ++ + static LIST_HEAD(tis_chips); + static DEFINE_MUTEX(tis_lock); + +@@ -338,12 +342,27 @@ out_err: + return rc; + } + ++static void disable_interrupts(struct tpm_chip *chip) ++{ ++ u32 intmask; ++ ++ intmask = ++ ioread32(chip->vendor.iobase + ++ TPM_INT_ENABLE(chip->vendor.locality)); ++ intmask &= ~TPM_GLOBAL_INT_ENABLE; ++ iowrite32(intmask, ++ chip->vendor.iobase + ++ TPM_INT_ENABLE(chip->vendor.locality)); ++ free_irq(chip->vendor.irq, chip); ++ chip->vendor.irq = 0; ++} ++ + /* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) ++static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) + { + int rc; + u32 ordinal; +@@ -373,6 +392,30 @@ out_err: + return rc; + } + ++static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) ++{ ++ int rc, irq; ++ struct priv_data *priv = chip->vendor.priv; ++ ++ if (!chip->vendor.irq || priv->irq_tested) ++ return tpm_tis_send_main(chip, buf, len); ++ ++ /* Verify receipt of the expected IRQ */ ++ irq = chip->vendor.irq; ++ chip->vendor.irq = 0; ++ rc = tpm_tis_send_main(chip, buf, len); ++ chip->vendor.irq = irq; ++ if (!priv->irq_tested) ++ msleep(1); ++ if (!priv->irq_tested) { ++ disable_interrupts(chip); ++ dev_err(chip->dev, ++ FW_BUG "TPM interrupt not working, polling instead\n"); ++ } ++ priv->irq_tested = true; ++ return rc; ++} ++ + struct tis_vendor_timeout_override { + u32 did_vid; + unsigned long timeout_us[4]; +@@ -546,6 +589,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id) + if (interrupt == 0) + return IRQ_NONE; + ++ ((struct priv_data *)chip->vendor.priv)->irq_tested = true; + if (interrupt & TPM_INTF_DATA_AVAIL_INT) + wake_up_interruptible(&chip->vendor.read_queue); + if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) +@@ -575,9 +619,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + u32 vendor, intfcaps, intmask; + int rc, i, irq_s, irq_e, probe; + struct tpm_chip *chip; ++ struct priv_data *priv; + ++ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); ++ if (priv == NULL) ++ return -ENOMEM; + if (!(chip = tpm_register_hardware(dev, &tpm_tis))) + return -ENODEV; ++ chip->vendor.priv = priv; + + chip->vendor.iobase = ioremap(start, len); + if (!chip->vendor.iobase) { +@@ -646,19 +695,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + if (intfcaps & TPM_INTF_DATA_AVAIL_INT) + dev_dbg(dev, "\tData Avail Int Support\n"); + +- /* get the timeouts before testing for irqs */ +- if (tpm_get_timeouts(chip)) { +- dev_err(dev, "Could not get TPM timeouts and durations\n"); +- rc = -ENODEV; +- goto out_err; +- } +- +- if (tpm_do_selftest(chip)) { +- dev_err(dev, "TPM self test failed\n"); +- rc = -ENODEV; +- goto out_err; +- } +- + /* INTERRUPT Setup */ + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); +@@ -760,6 +796,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + } + } + ++ if (tpm_get_timeouts(chip)) { ++ dev_err(dev, "Could not get TPM timeouts and durations\n"); ++ rc = -ENODEV; ++ goto out_err; ++ } ++ ++ if (tpm_do_selftest(chip)) { ++ dev_err(dev, "TPM self test failed\n"); ++ rc = -ENODEV; ++ goto out_err; ++ } ++ + INIT_LIST_HEAD(&chip->vendor.list); + mutex_lock(&tis_lock); + list_add(&chip->vendor.list, &tis_chips); +diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c +index 4a58c55255bd..797bab97cea6 100644 +--- a/drivers/clk/clk-gate.c ++++ b/drivers/clk/clk-gate.c +@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name, + struct clk_init_data init; + + if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { +- if (bit_idx > 16) { ++ if (bit_idx > 15) { + pr_err("gate bit exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } +diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c +index 88523f91d9b7..7555793097f2 100644 +--- a/drivers/clk/sunxi/clk-factors.c ++++ b/drivers/clk/sunxi/clk-factors.c +@@ -70,7 +70,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw, + p = FACTOR_GET(config->pshift, config->pwidth, reg); + + /* Calculate the rate */ +- rate = (parent_rate * n * (k + 1) >> p) / (m + 1); ++ rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1); + + return rate; + } +diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h +index f49851cc4380..441fdc3f5717 100644 +--- a/drivers/clk/sunxi/clk-factors.h ++++ b/drivers/clk/sunxi/clk-factors.h +@@ -15,6 +15,7 @@ struct clk_factors_config { + u8 mwidth; + u8 pshift; + u8 pwidth; ++ u8 n_start; + }; + + struct clk *clk_register_factors(struct device *dev, const char *name, +diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c +index 34ee69f4d50c..2cc7b59b5e4a 100644 +--- a/drivers/clk/sunxi/clk-sunxi.c ++++ b/drivers/clk/sunxi/clk-sunxi.c +@@ -279,6 +279,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = { + .kwidth = 2, + .mshift = 0, + .mwidth = 2, ++ .n_start = 1, + }; + + static struct clk_factors_config sun4i_apb1_config = { +diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c +index cc40fe64f2dc..01eb95cd549e 100644 +--- a/drivers/clk/zynq/clkc.c ++++ b/drivers/clk/zynq/clkc.c +@@ -276,6 +276,7 @@ static void __init zynq_clk_setup(struct device_node *np) + clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], + "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, + 26, 0, &armclk_lock); ++ clk_prepare_enable(clks[cpu_2x]); + + clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, + 4 + 2 * tmp); +diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c +index 22dcb81ef9d0..e62fb3ffba8a 100644 +--- a/drivers/cpufreq/s3c2416-cpufreq.c ++++ b/drivers/cpufreq/s3c2416-cpufreq.c +@@ -295,7 +295,7 @@ out: + } + + #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE +-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) ++static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) + { + int count, v, i, found; + struct cpufreq_frequency_table *freq; +@@ -367,7 +367,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = { + .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, + }; + +-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) ++static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) + { + struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; + struct cpufreq_frequency_table *freq; +diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c +index b0f343fcb7ee..83b63f2fa51f 100644 +--- a/drivers/cpufreq/s3c24xx-cpufreq.c ++++ b/drivers/cpufreq/s3c24xx-cpufreq.c +@@ -483,7 +483,7 @@ static struct cpufreq_driver s3c24xx_driver = { + }; + + +-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) ++int s3c_cpufreq_register(struct s3c_cpufreq_info *info) + { + if (!info || !info->name) { + printk(KERN_ERR "%s: failed to pass valid information\n", +diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c +index 7047821a7f8a..4ab7a2156672 100644 +--- a/drivers/cpufreq/speedstep-lib.c ++++ b/drivers/cpufreq/speedstep-lib.c +@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, + + pr_debug("previous speed is %u\n", prev_speed); + ++ preempt_disable(); + local_irq_save(flags); + + /* switch to low state */ +@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, + + out: + local_irq_restore(flags); ++ preempt_enable(); ++ + return ret; + } + EXPORT_SYMBOL_GPL(speedstep_get_freqs); +diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c +index abfba4f731eb..1f6c4adc85d1 100644 +--- a/drivers/cpufreq/speedstep-smi.c ++++ b/drivers/cpufreq/speedstep-smi.c +@@ -188,6 +188,7 @@ static void speedstep_set_state(unsigned int state) + return; + + /* Disable IRQs */ ++ preempt_disable(); + local_irq_save(flags); + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); +@@ -198,9 +199,19 @@ static void speedstep_set_state(unsigned int state) + + do { + if (retry) { ++ /* ++ * We need to enable interrupts, otherwise the blockage ++ * won't resolve. ++ * ++ * We disable preemption so that other processes don't ++ * run. If other processes were running, they could ++ * submit more DMA requests, making the blockage worse. ++ */ + pr_debug("retry %u, previous result %u, waiting...\n", + retry, result); ++ local_irq_enable(); + mdelay(retry * 50); ++ local_irq_disable(); + } + retry++; + __asm__ __volatile__( +@@ -217,6 +228,7 @@ static void speedstep_set_state(unsigned int state) + + /* enable IRQs */ + local_irq_restore(flags); ++ preempt_enable(); + + if (new_state == state) + pr_debug("change to %u MHz succeeded after %u tries " +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index d43a6202a5c5..10162af430c5 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2043,7 +2043,13 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, + + void amd64_decode_bus_error(int node_id, struct mce *m) + { +- __amd64_decode_bus_error(mcis[node_id], m); ++ struct mem_ctl_info *mci; ++ ++ mci = edac_mc_find(node_id); ++ if (!mci) ++ return; ++ ++ __amd64_decode_bus_error(mci, m); + } + + /* +diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c +index 276a4229b032..a1c47b4bc6d4 100644 +--- a/drivers/gpio/gpio-tps65912.c ++++ b/drivers/gpio/gpio-tps65912.c +@@ -26,9 +26,12 @@ struct tps65912_gpio_data { + struct gpio_chip gpio_chip; + }; + ++#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip) ++ + static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + int val; + + val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); +@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) + static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, + int value) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + if (value) + tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, +@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, + static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, + int value) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + /* Set the initial value */ + tps65912_gpio_set(gc, offset, value); +@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, + + static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, + GPIO_CFG_MASK); +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index 63e7fad69ced..836af49da901 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -44,12 +44,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) + + ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); + if (ret < 0) { +- /* We've found the gpio chip, but the translation failed. +- * Return true to stop looking and return the translation +- * error via out_gpio ++ /* We've found a gpio chip, but the translation failed. ++ * Store translation error in out_gpio. ++ * Return false to keep looking, as more than one gpio chip ++ * could be registered per of-node. + */ + gg_data->out_gpio = ERR_PTR(ret); +- return true; ++ return false; + } + + gg_data->out_gpio = ret + gc->base; +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index cdc7f408bd18..6e2e4a859047 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3027,7 +3027,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + +- /* EVENT_WRITE_EOP - flush caches, send int */ ++ /* Workaround for cache flush problems. First send a dummy EOP ++ * event down the pipe with seq one below. ++ */ ++ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); ++ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | ++ EOP_TC_ACTION_EN | ++ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | ++ EVENT_INDEX(5))); ++ radeon_ring_write(ring, addr & 0xfffffffc); ++ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | ++ DATA_SEL(1) | INT_SEL(0)); ++ radeon_ring_write(ring, fence->seq - 1); ++ radeon_ring_write(ring, 0); ++ ++ /* Then send the real EOP event down the pipe. */ + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | +@@ -6092,7 +6106,6 @@ int cik_irq_set(struct radeon_device *rdev) + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 grbm_int_cntl = 0; + u32 dma_cntl, dma_cntl1; +- u32 thermal_int; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); +@@ -6129,13 +6142,6 @@ int cik_irq_set(struct radeon_device *rdev) + cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; + cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; + +- if (rdev->flags & RADEON_IS_IGP) +- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & +- ~(THERM_INTH_MASK | THERM_INTL_MASK); +- else +- thermal_int = RREG32_SMC(CG_THERMAL_INT) & +- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); +- + /* enable CP interrupts on all rings */ + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { + DRM_DEBUG("cik_irq_set: sw int gfx\n"); +@@ -6293,14 +6299,6 @@ int cik_irq_set(struct radeon_device *rdev) + hpd6 |= DC_HPDx_INT_EN; + } + +- if (rdev->irq.dpm_thermal) { +- DRM_DEBUG("dpm thermal\n"); +- if (rdev->flags & RADEON_IS_IGP) +- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; +- else +- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; +- } +- + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + + WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); +@@ -6354,11 +6352,6 @@ int cik_irq_set(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + +- if (rdev->flags & RADEON_IS_IGP) +- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); +- else +- WREG32_SMC(CG_THERMAL_INT, thermal_int); +- + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c +index b41905573cd2..47a7a34d3b0c 100644 +--- a/drivers/gpu/drm/radeon/kv_dpm.c ++++ b/drivers/gpu/drm/radeon/kv_dpm.c +@@ -1121,6 +1121,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) + } + } + ++static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) ++{ ++ u32 thermal_int; ++ ++ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); ++ if (enable) ++ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; ++ else ++ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); ++ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); ++ ++} ++ + int kv_dpm_enable(struct radeon_device *rdev) + { + struct kv_power_info *pi = kv_get_pi(rdev); +@@ -1222,8 +1235,7 @@ int kv_dpm_enable(struct radeon_device *rdev) + DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + return ret; + } +- rdev->irq.dpm_thermal = true; +- radeon_irq_set(rdev); ++ kv_enable_thermal_int(rdev, true); + } + + ret = kv_smc_bapm_enable(rdev, false); +@@ -1269,6 +1281,7 @@ void kv_dpm_disable(struct radeon_device *rdev) + kv_stop_dpm(rdev); + kv_enable_ulv(rdev, false); + kv_reset_am(rdev); ++ kv_enable_thermal_int(rdev, false); + + kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); + } +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c +index 474343adf262..d5f7e8c14b2e 100644 +--- a/drivers/gpu/drm/radeon/ni.c ++++ b/drivers/gpu/drm/radeon/ni.c +@@ -1077,12 +1077,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) + + if ((rdev->config.cayman.max_backends_per_se == 1) && + (rdev->flags & RADEON_IS_IGP)) { +- if ((disabled_rb_mask & 3) == 1) { +- /* RB0 disabled, RB1 enabled */ +- tmp = 0x11111111; +- } else { ++ if ((disabled_rb_mask & 3) == 2) { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; ++ } else { ++ /* RB0 disabled, RB1 enabled */ ++ tmp = 0x11111111; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; +diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c +index cc4258a853fd..729ad831886f 100644 +--- a/drivers/gpu/drm/radeon/r600_dpm.c ++++ b/drivers/gpu/drm/radeon/r600_dpm.c +@@ -187,7 +187,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { +- vrefresh = radeon_crtc->hw_mode.vrefresh; ++ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); + break; + } + } +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 62d73264b3e2..b2ee609f77a9 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -702,6 +702,12 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type) + if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && + type == HID_COLLECTION_PHYSICAL) + hid->group = HID_GROUP_SENSOR_HUB; ++ ++ if (hid->vendor == USB_VENDOR_ID_MICROSOFT && ++ (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 || ++ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) && ++ hid->group == HID_GROUP_MULTITOUCH) ++ hid->group = HID_GROUP_GENERIC; + } + + static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) +@@ -1790,10 +1796,14 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, + { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 60348ec399fc..946b8cbfaa9f 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -291,6 +291,8 @@ + #define USB_VENDOR_ID_ELAN 0x04f3 + #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 + #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b ++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103 ++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c + #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f + + #define USB_VENDOR_ID_ELECOM 0x056e +@@ -611,14 +613,18 @@ + + #define USB_VENDOR_ID_MICROSOFT 0x045e + #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b ++#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048 + #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d + #define USB_DEVICE_ID_MS_NE4K 0x00db + #define USB_DEVICE_ID_MS_NE4K_JP 0x00dc + #define USB_DEVICE_ID_MS_LK6K 0x00f9 + #define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 + #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 ++#define USB_DEVICE_ID_MS_NE7K 0x071d + #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 + #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c ++#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc ++#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd + + #define USB_VENDOR_ID_MOJO 0x8282 + #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 +@@ -711,6 +717,8 @@ + #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff + + #define USB_VENDOR_ID_PIXART 0x093a ++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2 0x0137 ++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE 0x2510 + #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001 + #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002 + #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 6f568b64784b..9dcccbde65fb 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -306,10 +306,13 @@ static enum power_supply_property hidinput_battery_props[] = { + + static const struct hid_device_id hid_battery_quirks[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, +- USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), +- HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, ++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), ++ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), ++ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, +- USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), ++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), +@@ -1066,6 +1069,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct + return; + } + ++ /* ++ * Ignore reports for absolute data if the data didn't change. This is ++ * not only an optimization but also fixes 'dead' key reports. Some ++ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID ++ * 0x31 and 0x32) report multiple keys, even though a localized keyboard ++ * can only have one of them physically available. The 'dead' keys ++ * report constant 0. As all map to the same keycode, they'd confuse ++ * the input layer. If we filter the 'dead' keys on the HID level, we ++ * skip the keycode translation and only forward real events. ++ */ ++ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE | ++ HID_MAIN_ITEM_BUFFERED_BYTE)) && ++ (field->flags & HID_MAIN_ITEM_VARIABLE) && ++ usage->usage_index < field->maxusage && ++ value == field->value[usage->usage_index]) ++ return; ++ + /* report the usage code as scancode if the key status has changed */ + if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value) + input_event(input, EV_MSC, MSC_SCAN, usage->hid); +diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c +index 551795b7da1d..7e56e18665da 100644 +--- a/drivers/hid/hid-microsoft.c ++++ b/drivers/hid/hid-microsoft.c +@@ -65,6 +65,26 @@ static int ms_ergonomy_kb_quirk(struct hid_input *hi, struct hid_usage *usage, + switch (usage->hid & HID_USAGE) { + case 0xfd06: ms_map_key_clear(KEY_CHAT); break; + case 0xfd07: ms_map_key_clear(KEY_PHONE); break; ++ case 0xff00: ++ /* Special keypad keys */ ++ ms_map_key_clear(KEY_KPEQUAL); ++ set_bit(KEY_KPLEFTPAREN, input->keybit); ++ set_bit(KEY_KPRIGHTPAREN, input->keybit); ++ break; ++ case 0xff01: ++ /* Scroll wheel */ ++ hid_map_usage_clear(hi, usage, bit, max, EV_REL, REL_WHEEL); ++ break; ++ case 0xff02: ++ /* ++ * This byte contains a copy of the modifier keys byte of a ++ * standard hid keyboard report, as send by interface 0 ++ * (this usage is found on interface 1). ++ * ++ * This byte only gets send when another key in the same report ++ * changes state, and as such is useless, ignore it. ++ */ ++ return -1; + case 0xff05: + set_bit(EV_REP, input->evbit); + ms_map_key_clear(KEY_F13); +@@ -133,14 +153,39 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) + { + unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); ++ struct input_dev *input; + + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || + !usage->type) + return 0; + ++ input = field->hidinput->input; ++ + /* Handling MS keyboards special buttons */ ++ if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff00)) { ++ /* Special keypad keys */ ++ input_report_key(input, KEY_KPEQUAL, value & 0x01); ++ input_report_key(input, KEY_KPLEFTPAREN, value & 0x02); ++ input_report_key(input, KEY_KPRIGHTPAREN, value & 0x04); ++ return 1; ++ } ++ ++ if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff01)) { ++ /* Scroll wheel */ ++ int step = ((value & 0x60) >> 5) + 1; ++ ++ switch (value & 0x1f) { ++ case 0x01: ++ input_report_rel(input, REL_WHEEL, step); ++ break; ++ case 0x1f: ++ input_report_rel(input, REL_WHEEL, -step); ++ break; ++ } ++ return 1; ++ } ++ + if (quirks & MS_ERGONOMY && usage->hid == (HID_UP_MSVENDOR | 0xff05)) { +- struct input_dev *input = field->hidinput->input; + static unsigned int last_key = 0; + unsigned int key = 0; + switch (value) { +@@ -193,10 +238,14 @@ err_free: + static const struct hid_device_id ms_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV), + .driver_data = MS_HIDINPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB), ++ .driver_data = MS_ERGONOMY }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K), + .driver_data = MS_ERGONOMY }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), + .driver_data = MS_ERGONOMY }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K), ++ .driver_data = MS_ERGONOMY }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), + .driver_data = MS_ERGONOMY | MS_RDESC }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), +@@ -207,6 +256,10 @@ static const struct hid_device_id ms_devices[] = { + .driver_data = MS_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), + .driver_data = MS_DUPLICATE_USAGES }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3), ++ .driver_data = MS_HIDINPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP), ++ .driver_data = MS_HIDINPUT }, + + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), + .driver_data = MS_PRESENTER }, +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index e29d8a0feb5f..f62c65ec117e 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -356,7 +356,10 @@ static int i2c_hid_hwreset(struct i2c_client *client) + static void i2c_hid_get_input(struct i2c_hid *ihid) + { + int ret, ret_size; +- int size = ihid->bufsize; ++ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); ++ ++ if (size > ihid->bufsize) ++ size = ihid->bufsize; + + ret = i2c_master_recv(ihid->client, ihid->inbuf, size); + if (ret != size) { +diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c +index 10b616702780..0b531c6a76a5 100644 +--- a/drivers/hid/usbhid/hid-pidff.c ++++ b/drivers/hid/usbhid/hid-pidff.c +@@ -1252,6 +1252,8 @@ int hid_pidff_init(struct hid_device *hid) + + pidff->hid = hid; + ++ hid_device_io_start(hid); ++ + pidff_find_reports(hid, HID_OUTPUT_REPORT, pidff); + pidff_find_reports(hid, HID_FEATURE_REPORT, pidff); + +@@ -1315,9 +1317,13 @@ int hid_pidff_init(struct hid_device *hid) + + hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); + ++ hid_device_io_stop(hid); ++ + return 0; + + fail: ++ hid_device_io_stop(hid); ++ + kfree(pidff); + return error; + } +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index 3554496bacf8..25484ee3c51e 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -72,16 +72,22 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, ++ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c +index 57825ead7db2..7248147fbf2b 100644 +--- a/drivers/iio/dac/ad5686.c ++++ b/drivers/iio/dac/ad5686.c +@@ -321,7 +321,7 @@ static int ad5686_probe(struct spi_device *spi) + st = iio_priv(indio_dev); + spi_set_drvdata(spi, indio_dev); + +- st->reg = devm_regulator_get(&spi->dev, "vcc"); ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); + if (!IS_ERR(st->reg)) { + ret = regulator_enable(st->reg); + if (ret) +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c +index 7c582f7ae34e..70753bf23a86 100644 +--- a/drivers/iio/imu/adis16400_core.c ++++ b/drivers/iio/imu/adis16400_core.c +@@ -26,6 +26,7 @@ + #include <linux/list.h> + #include <linux/module.h> + #include <linux/debugfs.h> ++#include <linux/bitops.h> + + #include <linux/iio/iio.h> + #include <linux/iio/sysfs.h> +@@ -447,7 +448,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, + mutex_unlock(&indio_dev->mlock); + if (ret) + return ret; +- val16 = ((val16 & 0xFFF) << 4) >> 4; ++ val16 = sign_extend32(val16, 11); + *val = val16; + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h +index 1946101419a3..675d3c796b9f 100644 +--- a/drivers/infiniband/hw/qib/qib.h ++++ b/drivers/infiniband/hw/qib/qib.h +@@ -1080,12 +1080,6 @@ struct qib_devdata { + /* control high-level access to EEPROM */ + struct mutex eep_lock; + uint64_t traffic_wds; +- /* active time is kept in seconds, but logged in hours */ +- atomic_t active_time; +- /* Below are nominal shadow of EEPROM, new since last EEPROM update */ +- uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; +- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; +- uint16_t eep_hrs; + /* + * masks for which bits of errs, hwerrs that cause + * each of the counters to increment. +@@ -1307,8 +1301,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, + int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, + const void *buffer, int len); + void qib_get_eeprom_info(struct qib_devdata *); +-int qib_update_eeprom_log(struct qib_devdata *dd); +-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); ++#define qib_inc_eeprom_err(dd, eidx, incr) + void qib_dump_lookup_output_queue(struct qib_devdata *); + void qib_force_pio_avail_update(struct qib_devdata *); + void qib_clear_symerror_on_linkup(unsigned long opaque); +diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c +index 4d5d71aaa2b4..e2280b07df02 100644 +--- a/drivers/infiniband/hw/qib/qib_eeprom.c ++++ b/drivers/infiniband/hw/qib/qib_eeprom.c +@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) + "Board SN %s did not pass functional test: %s\n", + dd->serial, ifp->if_comment); + +- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); +- /* +- * Power-on (actually "active") hours are kept as little-endian value +- * in EEPROM, but as seconds in a (possibly as small as 24-bit) +- * atomic_t while running. +- */ +- atomic_set(&dd->active_time, 0); +- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); +- + done: + vfree(buf); + + bail:; + } + +-/** +- * qib_update_eeprom_log - copy active-time and error counters to eeprom +- * @dd: the qlogic_ib device +- * +- * Although the time is kept as seconds in the qib_devdata struct, it is +- * rounded to hours for re-write, as we have only 16 bits in EEPROM. +- * First-cut code reads whole (expected) struct qib_flash, modifies, +- * re-writes. Future direction: read/write only what we need, assuming +- * that the EEPROM had to have been "good enough" for driver init, and +- * if not, we aren't making it worse. +- * +- */ +-int qib_update_eeprom_log(struct qib_devdata *dd) +-{ +- void *buf; +- struct qib_flash *ifp; +- int len, hi_water; +- uint32_t new_time, new_hrs; +- u8 csum; +- int ret, idx; +- unsigned long flags; +- +- /* first, check if we actually need to do anything. */ +- ret = 0; +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- if (dd->eep_st_new_errs[idx]) { +- ret = 1; +- break; +- } +- } +- new_time = atomic_read(&dd->active_time); +- +- if (ret == 0 && new_time < 3600) +- goto bail; +- +- /* +- * The quick-check above determined that there is something worthy +- * of logging, so get current contents and do a more detailed idea. +- * read full flash, not just currently used part, since it may have +- * been written with a newer definition +- */ +- len = sizeof(struct qib_flash); +- buf = vmalloc(len); +- ret = 1; +- if (!buf) { +- qib_dev_err(dd, +- "Couldn't allocate memory to read %u bytes from eeprom for logging\n", +- len); +- goto bail; +- } +- +- /* Grab semaphore and read current EEPROM. If we get an +- * error, let go, but if not, keep it until we finish write. +- */ +- ret = mutex_lock_interruptible(&dd->eep_lock); +- if (ret) { +- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); +- goto free_bail; +- } +- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); +- if (ret) { +- mutex_unlock(&dd->eep_lock); +- qib_dev_err(dd, "Unable read EEPROM for logging\n"); +- goto free_bail; +- } +- ifp = (struct qib_flash *)buf; +- +- csum = flash_csum(ifp, 0); +- if (csum != ifp->if_csum) { +- mutex_unlock(&dd->eep_lock); +- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", +- csum, ifp->if_csum); +- ret = 1; +- goto free_bail; +- } +- hi_water = 0; +- spin_lock_irqsave(&dd->eep_st_lock, flags); +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- int new_val = dd->eep_st_new_errs[idx]; +- if (new_val) { +- /* +- * If we have seen any errors, add to EEPROM values +- * We need to saturate at 0xFF (255) and we also +- * would need to adjust the checksum if we were +- * trying to minimize EEPROM traffic +- * Note that we add to actual current count in EEPROM, +- * in case it was altered while we were running. +- */ +- new_val += ifp->if_errcntp[idx]; +- if (new_val > 0xFF) +- new_val = 0xFF; +- if (ifp->if_errcntp[idx] != new_val) { +- ifp->if_errcntp[idx] = new_val; +- hi_water = offsetof(struct qib_flash, +- if_errcntp) + idx; +- } +- /* +- * update our shadow (used to minimize EEPROM +- * traffic), to match what we are about to write. +- */ +- dd->eep_st_errs[idx] = new_val; +- dd->eep_st_new_errs[idx] = 0; +- } +- } +- /* +- * Now update active-time. We would like to round to the nearest hour +- * but unless atomic_t are sure to be proper signed ints we cannot, +- * because we need to account for what we "transfer" to EEPROM and +- * if we log an hour at 31 minutes, then we would need to set +- * active_time to -29 to accurately count the _next_ hour. +- */ +- if (new_time >= 3600) { +- new_hrs = new_time / 3600; +- atomic_sub((new_hrs * 3600), &dd->active_time); +- new_hrs += dd->eep_hrs; +- if (new_hrs > 0xFFFF) +- new_hrs = 0xFFFF; +- dd->eep_hrs = new_hrs; +- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { +- ifp->if_powerhour[0] = new_hrs & 0xFF; +- hi_water = offsetof(struct qib_flash, if_powerhour); +- } +- if ((new_hrs >> 8) != ifp->if_powerhour[1]) { +- ifp->if_powerhour[1] = new_hrs >> 8; +- hi_water = offsetof(struct qib_flash, if_powerhour) + 1; +- } +- } +- /* +- * There is a tiny possibility that we could somehow fail to write +- * the EEPROM after updating our shadows, but problems from holding +- * the spinlock too long are a much bigger issue. +- */ +- spin_unlock_irqrestore(&dd->eep_st_lock, flags); +- if (hi_water) { +- /* we made some change to the data, uopdate cksum and write */ +- csum = flash_csum(ifp, 1); +- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); +- } +- mutex_unlock(&dd->eep_lock); +- if (ret) +- qib_dev_err(dd, "Failed updating EEPROM\n"); +- +-free_bail: +- vfree(buf); +-bail: +- return ret; +-} +- +-/** +- * qib_inc_eeprom_err - increment one of the four error counters +- * that are logged to EEPROM. +- * @dd: the qlogic_ib device +- * @eidx: 0..3, the counter to increment +- * @incr: how much to add +- * +- * Each counter is 8-bits, and saturates at 255 (0xFF). They +- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() +- * is called, but it can only be called in a context that allows sleep. +- * This function can be called even at interrupt level. +- */ +-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) +-{ +- uint new_val; +- unsigned long flags; +- +- spin_lock_irqsave(&dd->eep_st_lock, flags); +- new_val = dd->eep_st_new_errs[eidx] + incr; +- if (new_val > 255) +- new_val = 255; +- dd->eep_st_new_errs[eidx] = new_val; +- spin_unlock_irqrestore(&dd->eep_st_lock, flags); +-} +diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c +index 84e593d6007b..295f6312e6a9 100644 +--- a/drivers/infiniband/hw/qib/qib_iba6120.c ++++ b/drivers/infiniband/hw/qib/qib_iba6120.c +@@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque) + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + + qib_chk_6120_errormask(dd); +diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c +index 454c2e7668fe..c86e71b9e160 100644 +--- a/drivers/infiniband/hw/qib/qib_iba7220.c ++++ b/drivers/infiniband/hw/qib/qib_iba7220.c +@@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque) + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + done: + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); +diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c +index d1bd21319d7d..0f8d1f0bd929 100644 +--- a/drivers/infiniband/hw/qib/qib_iba7322.c ++++ b/drivers/infiniband/hw/qib/qib_iba7322.c +@@ -5191,8 +5191,6 @@ static void qib_get_7322_faststats(unsigned long opaque) + spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); + traffic_wds -= ppd->dd->traffic_wds; + ppd->dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); + spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); + if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & + QIB_IB_QDR) && +diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c +index 76c3e177164d..8c9bb6c35838 100644 +--- a/drivers/infiniband/hw/qib/qib_init.c ++++ b/drivers/infiniband/hw/qib/qib_init.c +@@ -922,7 +922,6 @@ static void qib_shutdown_device(struct qib_devdata *dd) + } + } + +- qib_update_eeprom_log(dd); + } + + /** +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c +index 3c8e4e3caca6..b9ccbda7817d 100644 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c +@@ -611,28 +611,6 @@ bail: + return ret < 0 ? ret : count; + } + +-static ssize_t show_logged_errs(struct device *device, +- struct device_attribute *attr, char *buf) +-{ +- struct qib_ibdev *dev = +- container_of(device, struct qib_ibdev, ibdev.dev); +- struct qib_devdata *dd = dd_from_dev(dev); +- int idx, count; +- +- /* force consistency with actual EEPROM */ +- if (qib_update_eeprom_log(dd) != 0) +- return -ENXIO; +- +- count = 0; +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", +- dd->eep_st_errs[idx], +- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); +- } +- +- return count; +-} +- + /* + * Dump tempsense regs. in decimal, to ease shell-scripts. + */ +@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); + static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); + static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); + static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); +-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); + static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); + static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); + static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); +@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = { + &dev_attr_nfreectxts, + &dev_attr_serial, + &dev_attr_boardversion, +- &dev_attr_logged_errors, + &dev_attr_tempsense, + &dev_attr_localbus_info, + &dev_attr_chip_reset, +diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c +index e60c2eaea7bb..951addc80fcc 100644 +--- a/drivers/md/dm-io.c ++++ b/drivers/md/dm-io.c +@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, + unsigned short logical_block_size = queue_logical_block_size(q); + sector_t num_sectors; + ++ /* Reject unsupported discard requests */ ++ if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { ++ dec_count(io, region, -EOPNOTSUPP); ++ return; ++ } ++ + /* + * where->count may be zero if rw holds a flush and we need to + * send a zero-sized flush. +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 9584443c5614..9388c3654f0a 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context) + return; + } + ++ /* ++ * If the bio is discard, return an error, but do not ++ * degrade the array. ++ */ ++ if (bio->bi_rw & REQ_DISCARD) { ++ bio_endio(bio, -EOPNOTSUPP); ++ return; ++ } ++ + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error)) + fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index 944690bafd93..d892a05c84f4 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -1439,8 +1439,6 @@ out: + full_bio->bi_end_io = pe->full_bio_end_io; + full_bio->bi_private = pe->full_bio_private; + } +- free_pending_exception(pe); +- + increment_pending_exceptions_done_count(); + + up_write(&s->lock); +@@ -1457,6 +1455,8 @@ out: + } + + retry_origin_bios(s, origin_bios); ++ ++ free_pending_exception(pe); + } + + static void commit_callback(void *context, int success) +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 2f03e8e10c24..93f3fe443657 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2375,7 +2375,7 @@ int dm_setup_md_queue(struct mapped_device *md) + return 0; + } + +-static struct mapped_device *dm_find_md(dev_t dev) ++struct mapped_device *dm_get_md(dev_t dev) + { + struct mapped_device *md; + unsigned minor = MINOR(dev); +@@ -2386,12 +2386,15 @@ static struct mapped_device *dm_find_md(dev_t dev) + spin_lock(&_minor_lock); + + md = idr_find(&_minor_idr, minor); +- if (md && (md == MINOR_ALLOCED || +- (MINOR(disk_devt(dm_disk(md))) != minor) || +- dm_deleting_md(md) || +- test_bit(DMF_FREEING, &md->flags))) { +- md = NULL; +- goto out; ++ if (md) { ++ if ((md == MINOR_ALLOCED || ++ (MINOR(disk_devt(dm_disk(md))) != minor) || ++ dm_deleting_md(md) || ++ test_bit(DMF_FREEING, &md->flags))) { ++ md = NULL; ++ goto out; ++ } ++ dm_get(md); + } + + out: +@@ -2399,16 +2402,6 @@ out: + + return md; + } +- +-struct mapped_device *dm_get_md(dev_t dev) +-{ +- struct mapped_device *md = dm_find_md(dev); +- +- if (md) +- dm_get(md); +- +- return md; +-} + EXPORT_SYMBOL_GPL(dm_get_md); + + void *dm_get_mdptr(struct mapped_device *md) +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 6564eebbdf0e..633b6e1e7d4d 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -557,7 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + if (test_bit(WriteMostly, &rdev->flags)) { + /* Don't balance among write-mostly, just + * use the first as a last resort */ +- if (best_disk < 0) { ++ if (best_dist_disk < 0) { + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { + if (first_bad < this_sector) +@@ -566,7 +566,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + best_good_sectors = first_bad - this_sector; + } else + best_good_sectors = sectors; +- best_disk = disk; ++ best_dist_disk = disk; ++ best_pending_disk = disk; + } + continue; + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 7b54c3bf9f8f..09c18062bbc2 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2964,7 +2964,8 @@ static void handle_stripe_dirtying(struct r5conf *conf, + * generate correct data from the parity. + */ + if (conf->max_degraded == 2 || +- (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { ++ (recovery_cp < MaxSector && sh->sector >= recovery_cp && ++ s->failed == 0)) { + /* Calculate the real rcw later - for now make it + * look like rcw is cheaper + */ +diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c +index f674dc024d06..d2a4e6d40bf0 100644 +--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c ++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c +@@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) + { + struct dvb_usb_device *d = adap_to_d(adap); + struct lme2510_state *lme_int = adap_to_priv(adap); ++ struct usb_host_endpoint *ep; + + lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC); + +@@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) + adap, + 8); + ++ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */ ++ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); ++ ++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) ++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), ++ + lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); +diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c +index 3fd43b0e69d1..65bcebb89260 100644 +--- a/drivers/misc/mei/init.c ++++ b/drivers/misc/mei/init.c +@@ -228,6 +228,8 @@ void mei_stop(struct mei_device *dev) + + dev->dev_state = MEI_DEV_POWER_DOWN; + mei_reset(dev, 0); ++ /* move device to disabled state unconditionally */ ++ dev->dev_state = MEI_DEV_DISABLED; + + mutex_unlock(&dev->device_lock); + +diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c +index 793dacd3b841..561c6b4907a1 100644 +--- a/drivers/mmc/host/sdhci-pxav3.c ++++ b/drivers/mmc/host/sdhci-pxav3.c +@@ -201,8 +201,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) + if (!pdata) + return NULL; + +- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); +- if (clk_delay_cycles > 0) ++ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles", ++ &clk_delay_cycles)) + pdata->clk_delay_cycles = clk_delay_cycles; + + return pdata; +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 89d21fc47a16..393873fb792e 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -635,12 +635,15 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, + return 0; + } + ++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ ++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) ++ + /* Get packet from user space buffer */ + static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + const struct iovec *iv, unsigned long total_len, + size_t count, int noblock) + { +- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); ++ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); + struct sk_buff *skb; + struct macvlan_dev *vlan; + unsigned long len = total_len; +@@ -699,7 +702,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + linear = vnet_hdr.hdr_len; + } + +- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, ++ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, + linear, noblock, &err); + if (!skb) + goto err; +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 36c6994436b7..0bc73f2c24ba 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -203,6 +203,25 @@ static inline int phy_find_valid(int idx, u32 features) + } + + /** ++ * phy_check_valid - check if there is a valid PHY setting which matches ++ * speed, duplex, and feature mask ++ * @speed: speed to match ++ * @duplex: duplex to match ++ * @features: A mask of the valid settings ++ * ++ * Description: Returns true if there is a valid setting, false otherwise. ++ */ ++static inline bool phy_check_valid(int speed, int duplex, u32 features) ++{ ++ unsigned int idx; ++ ++ idx = phy_find_valid(phy_find_setting(speed, duplex), features); ++ ++ return settings[idx].speed == speed && settings[idx].duplex == duplex && ++ (settings[idx].setting & features); ++} ++ ++/** + * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex + * @phydev: the target phy_device struct + * +@@ -1018,7 +1037,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) + (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { + int eee_lp, eee_cap, eee_adv; + u32 lp, cap, adv; +- int idx, status; ++ int status; + + /* Read phy status to properly get the right settings */ + status = phy_read_status(phydev); +@@ -1050,8 +1069,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) + + adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); + lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); +- idx = phy_find_setting(phydev->speed, phydev->duplex); +- if (!(lp & adv & settings[idx].setting)) ++ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) + goto eee_exit; + + if (clk_stop_enable) { +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 258f65ba733f..020581ddfdd3 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -42,9 +42,7 @@ + + static struct team_port *team_port_get_rcu(const struct net_device *dev) + { +- struct team_port *port = rcu_dereference(dev->rx_handler_data); +- +- return team_port_exists(dev) ? port : NULL; ++ return rcu_dereference(dev->rx_handler_data); + } + + static struct team_port *team_port_get_rtnl(const struct net_device *dev) +@@ -1718,11 +1716,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) + if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); +- rcu_read_lock(); +- list_for_each_entry_rcu(port, &team->port_list, list) ++ mutex_lock(&team->lock); ++ list_for_each_entry(port, &team->port_list, list) + if (team->ops.port_change_dev_addr) + team->ops.port_change_dev_addr(team, port); +- rcu_read_unlock(); ++ mutex_unlock(&team->lock); + return 0; + } + +diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c +index 0fcc8e65a068..74323e9d9004 100644 +--- a/drivers/net/usb/plusb.c ++++ b/drivers/net/usb/plusb.c +@@ -136,6 +136,11 @@ static const struct usb_device_id products [] = { + }, { + USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ + .driver_info = (unsigned long) &prolific_info, ++}, { ++ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB ++ * Host-to-Host Cable ++ */ ++ .driver_info = (unsigned long) &prolific_info, + }, + + { }, // END +diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c +index a3399c4f13a9..b9b651ea9851 100644 +--- a/drivers/net/wireless/ath/ath5k/reset.c ++++ b/drivers/net/wireless/ath/ath5k/reset.c +@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) + regval = ioread32(reg); + iowrite32(regval | val, reg); + regval = ioread32(reg); +- usleep_range(100, 150); ++ udelay(100); /* NB: should be atomic */ + + /* Bring BB/MAC out of reset */ + iowrite32(regval & ~val, reg); +diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h +index 61f6b21fb0ae..dc6bd8cd9b83 100644 +--- a/drivers/net/wireless/ath/ath6kl/hif.h ++++ b/drivers/net/wireless/ath/ath6kl/hif.h +@@ -197,9 +197,9 @@ struct hif_scatter_req { + /* bounce buffer for upper layers to copy to/from */ + u8 *virt_dma_buf; + +- struct hif_scatter_item scat_list[1]; +- + u32 scat_q_depth; ++ ++ struct hif_scatter_item scat_list[0]; + }; + + struct ath6kl_irq_proc_registers { +diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c +index 7126bdd4236c..6bf15a331714 100644 +--- a/drivers/net/wireless/ath/ath6kl/sdio.c ++++ b/drivers/net/wireless/ath/ath6kl/sdio.c +@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, + int i, scat_req_sz, scat_list_sz, size; + u8 *virt_buf; + +- scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); ++ scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + + if (!virt_scat) +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +index 5f6fd44e72f1..c34b011769b7 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +@@ -379,9 +379,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, + mvmvif->uploaded = false; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + +- /* does this make sense at all? */ +- mvmvif->color++; +- + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); + spin_unlock_bh(&mvm->time_event_lock); +diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c +index f41add9c8093..c95b4aac1317 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/tx.c ++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c +@@ -832,6 +832,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + sta_id = ba_notif->sta_id; + tid = ba_notif->tid; + ++ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || ++ tid >= IWL_MAX_TID_COUNT, ++ "sta_id %d tid %d", sta_id, tid)) ++ return 0; ++ + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); +diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c +index 1424335163b9..911a15074ffb 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c +@@ -729,7 +729,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + +- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); ++ /* ++ * Send 0 as the scd_base_addr since the device may have be reset ++ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will ++ * contain garbage. ++ */ ++ iwl_pcie_tx_start(trans, 0); + } + + /* +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 53dc57127ca3..150170bb53e6 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -1306,7 +1306,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) + if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) + return -ENOMEM; + +- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", ++ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, + (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), +diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c +index c5d0a08a8747..d6d499782fb4 100644 +--- a/drivers/pci/rom.c ++++ b/drivers/pci/rom.c +@@ -69,6 +69,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) + { + void __iomem *image; + int last_image; ++ unsigned length; + + image = rom; + do { +@@ -91,9 +92,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) + if (readb(pds + 3) != 'R') + break; + last_image = readb(pds + 21) & 0x80; +- /* this length is reliable */ +- image += readw(pds + 16) * 512; +- } while (!last_image); ++ length = readw(pds + 16); ++ image += length * 512; ++ } while (length && !last_image); + + /* never return a size larger than the PCI resource window */ + /* there are known ROMs that get the size wrong */ +diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c +index de029bbc1cc1..5ccca8743ce6 100644 +--- a/drivers/power/88pm860x_charger.c ++++ b/drivers/power/88pm860x_charger.c +@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev) + return 0; + + out_irq: ++ power_supply_unregister(&info->usb); + while (--i >= 0) + free_irq(info->irq[i], info); + out: +diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c +index ad3ff8fbfbbb..e4c95e1a6733 100644 +--- a/drivers/power/bq24190_charger.c ++++ b/drivers/power/bq24190_charger.c +@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger) + charger->properties = bq24190_charger_properties; + charger->num_properties = ARRAY_SIZE(bq24190_charger_properties); + charger->supplied_to = bq24190_charger_supplied_to; +- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to); ++ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to); + charger->get_property = bq24190_charger_get_property; + charger->set_property = bq24190_charger_set_property; + charger->property_is_writeable = bq24190_charger_property_is_writeable; +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index a1f5ac7a9806..b19dee79e1c4 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -564,7 +564,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) + "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); + return NULL; + } +- shost->dma_boundary = pcidev->dma_mask; + shost->max_id = BE2_MAX_SESSIONS; + shost->max_channel = 0; + shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index f6555921fd7a..a1f04e3b2a8f 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -92,6 +92,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) + { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; ++ ++ instance->mask_interrupts = 0; + /* For Thunderbolt/Invader also clear intr on enable */ + writel(~0, ®s->outbound_intr_status); + readl(®s->outbound_intr_status); +@@ -100,7 +102,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +- instance->mask_interrupts = 0; + } + + /** +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index df5e961484e1..eb81c98386b9 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -522,7 +522,7 @@ static ssize_t + sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) + { + sg_io_hdr_t *hp = &srp->header; +- int err = 0; ++ int err = 0, err2; + int len; + + if (count < SZ_SG_IO_HDR) { +@@ -551,8 +551,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) + goto err_out; + } + err_out: +- err = sg_finish_rem_req(srp); +- return (0 == err) ? count : err; ++ err2 = sg_finish_rem_req(srp); ++ return err ? : err2 ? : count; + } + + static ssize_t +diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c +index 2dfb06aedb15..b1c264e3a7b4 100644 +--- a/drivers/staging/comedi/comedi_compat32.c ++++ b/drivers/staging/comedi/comedi_compat32.c +@@ -265,7 +265,7 @@ static int compat_cmd(struct file *file, unsigned long arg) + { + struct comedi_cmd __user *cmd; + struct comedi32_cmd_struct __user *cmd32; +- int rc; ++ int rc, err; + + cmd32 = compat_ptr(arg); + cmd = compat_alloc_user_space(sizeof(*cmd)); +@@ -274,7 +274,15 @@ static int compat_cmd(struct file *file, unsigned long arg) + if (rc) + return rc; + +- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); ++ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); ++ if (rc == -EAGAIN) { ++ /* Special case: copy cmd back to user. */ ++ err = put_compat_cmd(cmd32, cmd); ++ if (err) ++ rc = err; ++ } ++ ++ return rc; + } + + /* Handle 32-bit COMEDI_CMDTEST ioctl. */ +diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c +index 388dbd7a5d27..21e5bc541417 100644 +--- a/drivers/staging/comedi/drivers/cb_pcidas64.c ++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c +@@ -451,6 +451,29 @@ static const struct comedi_lrange ai_ranges_64xx = { + } + }; + ++static const uint8_t ai_range_code_64xx[8] = { ++ 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */ ++ 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */ ++}; ++ ++/* analog input ranges for 64-Mx boards */ ++static const struct comedi_lrange ai_ranges_64_mx = { ++ 7, { ++ BIP_RANGE(5), ++ BIP_RANGE(2.5), ++ BIP_RANGE(1.25), ++ BIP_RANGE(0.625), ++ UNI_RANGE(5), ++ UNI_RANGE(2.5), ++ UNI_RANGE(1.25) ++ } ++}; ++ ++static const uint8_t ai_range_code_64_mx[7] = { ++ 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */ ++ 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */ ++}; ++ + /* analog input ranges for 60xx boards */ + static const struct comedi_lrange ai_ranges_60xx = { + 4, +@@ -462,6 +485,10 @@ static const struct comedi_lrange ai_ranges_60xx = { + } + }; + ++static const uint8_t ai_range_code_60xx[4] = { ++ 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */ ++}; ++ + /* analog input ranges for 6030, etc boards */ + static const struct comedi_lrange ai_ranges_6030 = { + 14, +@@ -483,6 +510,11 @@ static const struct comedi_lrange ai_ranges_6030 = { + } + }; + ++static const uint8_t ai_range_code_6030[14] = { ++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */ ++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */ ++}; ++ + /* analog input ranges for 6052, etc boards */ + static const struct comedi_lrange ai_ranges_6052 = { + 15, +@@ -505,6 +537,11 @@ static const struct comedi_lrange ai_ranges_6052 = { + } + }; + ++static const uint8_t ai_range_code_6052[15] = { ++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */ ++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */ ++}; ++ + /* analog input ranges for 4020 board */ + static const struct comedi_lrange ai_ranges_4020 = { + 2, +@@ -612,6 +649,7 @@ struct pcidas64_board { + int ai_bits; /* analog input resolution */ + int ai_speed; /* fastest conversion period in ns */ + const struct comedi_lrange *ai_range_table; ++ const uint8_t *ai_range_code; + int ao_nchan; /* number of analog out channels */ + int ao_bits; /* analog output resolution */ + int ao_scan_speed; /* analog output scan speed */ +@@ -670,6 +708,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -685,6 +724,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -699,7 +739,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -714,7 +755,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -729,7 +771,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -744,6 +787,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -759,6 +803,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -773,6 +818,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -788,6 +834,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -803,6 +850,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -818,6 +866,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -831,6 +880,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -842,6 +892,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -854,6 +905,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -867,6 +919,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -882,6 +935,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -897,6 +951,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -912,6 +967,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 3333, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -927,6 +983,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -942,6 +999,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -976,6 +1034,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -987,7 +1046,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -999,7 +1059,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1011,7 +1072,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1023,7 +1085,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1035,7 +1098,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1047,7 +1111,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1144,45 +1209,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev, + unsigned int range_index) + { + const struct pcidas64_board *thisboard = comedi_board(dev); +- const struct comedi_krange *range = +- &thisboard->ai_range_table->range[range_index]; +- unsigned int bits = 0; + +- switch (range->max) { +- case 10000000: +- bits = 0x000; +- break; +- case 5000000: +- bits = 0x100; +- break; +- case 2000000: +- case 2500000: +- bits = 0x200; +- break; +- case 1000000: +- case 1250000: +- bits = 0x300; +- break; +- case 500000: +- bits = 0x400; +- break; +- case 200000: +- case 250000: +- bits = 0x500; +- break; +- case 100000: +- bits = 0x600; +- break; +- case 50000: +- bits = 0x700; +- break; +- default: +- comedi_error(dev, "bug! in ai_range_bits_6xxx"); +- break; +- } +- if (range->min == 0) +- bits += 0x900; +- return bits; ++ return thisboard->ai_range_code[range_index] << 8; + } + + static unsigned int hw_revision(const struct comedi_device *dev, +diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c +index 3ff664ce7503..37b14f39551e 100644 +--- a/drivers/staging/lustre/lustre/llite/vvp_io.c ++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c +@@ -601,7 +601,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) + return 0; + } + +- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { ++ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + return -EFAULT; + } +diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c +index 601e9cc61e98..bb2890e79ca0 100644 +--- a/drivers/target/iscsi/iscsi_target_tq.c ++++ b/drivers/target/iscsi/iscsi_target_tq.c +@@ -24,36 +24,22 @@ + #include "iscsi_target_tq.h" + #include "iscsi_target.h" + +-static LIST_HEAD(active_ts_list); + static LIST_HEAD(inactive_ts_list); +-static DEFINE_SPINLOCK(active_ts_lock); + static DEFINE_SPINLOCK(inactive_ts_lock); + static DEFINE_SPINLOCK(ts_bitmap_lock); + +-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts) +-{ +- spin_lock(&active_ts_lock); +- list_add_tail(&ts->ts_list, &active_ts_list); +- iscsit_global->active_ts++; +- spin_unlock(&active_ts_lock); +-} +- + static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) + { ++ if (!list_empty(&ts->ts_list)) { ++ WARN_ON(1); ++ return; ++ } + spin_lock(&inactive_ts_lock); + list_add_tail(&ts->ts_list, &inactive_ts_list); + iscsit_global->inactive_ts++; + spin_unlock(&inactive_ts_lock); + } + +-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts) +-{ +- spin_lock(&active_ts_lock); +- list_del(&ts->ts_list); +- iscsit_global->active_ts--; +- spin_unlock(&active_ts_lock); +-} +- + static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) + { + struct iscsi_thread_set *ts; +@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) + + ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); + +- list_del(&ts->ts_list); ++ list_del_init(&ts->ts_list); + iscsit_global->inactive_ts--; + spin_unlock(&inactive_ts_lock); + +@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void) + + void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) + { +- iscsi_add_ts_to_active_list(ts); +- + spin_lock_bh(&ts->ts_state_lock); + conn->thread_set = ts; + ts->conn = conn; +@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) + + if (ts->delay_inactive && (--ts->thread_count == 0)) { + spin_unlock_bh(&ts->ts_state_lock); +- iscsi_del_ts_from_active_list(ts); + + if (!iscsit_global->in_shutdown) + iscsi_deallocate_extra_thread_sets(); +@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) + + if (ts->delay_inactive && (--ts->thread_count == 0)) { + spin_unlock_bh(&ts->ts_state_lock); +- iscsi_del_ts_from_active_list(ts); + + if (!iscsit_global->in_shutdown) + iscsi_deallocate_extra_thread_sets(); +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index a1e1ecdab86c..36c507c1b4fd 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -1877,8 +1877,8 @@ static int core_scsi3_update_aptpl_buf( + } + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { +- pr_err("Unable to update renaming" +- " APTPL metadata\n"); ++ pr_err("Unable to update renaming APTPL metadata," ++ " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } +@@ -1895,8 +1895,8 @@ static int core_scsi3_update_aptpl_buf( + lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { +- pr_err("Unable to update renaming" +- " APTPL metadata\n"); ++ pr_err("Unable to update renaming APTPL metadata," ++ " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } +@@ -1959,7 +1959,7 @@ static int __core_scsi3_write_aptpl_to_file( + static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) + { + unsigned char *buf; +- int rc; ++ int rc, len = PR_APTPL_BUF_LEN; + + if (!aptpl) { + char *null_buf = "No Registrations or Reservations\n"; +@@ -1973,25 +1973,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b + + return 0; + } +- +- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); ++retry: ++ buf = vzalloc(len); + if (!buf) + return TCM_OUT_OF_RESOURCES; + +- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); ++ rc = core_scsi3_update_aptpl_buf(dev, buf, len); + if (rc < 0) { +- kfree(buf); +- return TCM_OUT_OF_RESOURCES; ++ vfree(buf); ++ len *= 2; ++ goto retry; + } + + rc = __core_scsi3_write_aptpl_to_file(dev, buf); + if (rc != 0) { + pr_err("SPC-3 PR: Could not update APTPL\n"); +- kfree(buf); ++ vfree(buf); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + dev->t10_pr.pr_aptpl_active = 1; +- kfree(buf); ++ vfree(buf); + pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); + return 0; + } +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index d83aea80d83c..63d56cda2b96 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -250,6 +250,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) + static sense_reason_t + sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) + { ++ struct se_device *dev = cmd->se_dev; ++ sector_t end_lba = dev->transport->get_blocks(dev) + 1; + unsigned int sectors = sbc_get_write_same_sectors(cmd); + + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { +@@ -263,6 +265,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + sectors, cmd->se_dev->dev_attrib.max_write_same_len); + return TCM_INVALID_CDB_FIELD; + } ++ /* ++ * Sanity check for LBA wrap and request past end of device. ++ */ ++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || ++ ((cmd->t_task_lba + sectors) > end_lba)) { ++ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", ++ (unsigned long long)end_lba, cmd->t_task_lba, sectors); ++ return TCM_ADDRESS_OUT_OF_RANGE; ++ } ++ + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ + if (flags[0] & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); +@@ -830,7 +842,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + unsigned long long end_lba; + + end_lba = dev->transport->get_blocks(dev) + 1; +- if (cmd->t_task_lba + sectors > end_lba) { ++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || ++ ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("cmd exceeds last lba %llu " + "(lba %llu, sectors %u)\n", + end_lba, cmd->t_task_lba, sectors); +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 25c9bc783722..e49616eeb1cc 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -209,6 +209,9 @@ static int pty_signal(struct tty_struct *tty, int sig) + unsigned long flags; + struct pid *pgrp; + ++ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP) ++ return -EINVAL; ++ + if (tty->link) { + spin_lock_irqsave(&tty->link->ctrl_lock, flags); + pgrp = get_pid(tty->link->pgrp); +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 3b301a7ec662..ebdc00f184a1 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -2375,7 +2375,7 @@ static int atmel_serial_probe(struct platform_device *pdev) + + ret = atmel_init_port(port, pdev); + if (ret) +- goto err; ++ goto err_clear_bit; + + if (!atmel_use_pdc_rx(&port->uart)) { + ret = -ENOMEM; +@@ -2424,6 +2424,8 @@ err_alloc_ring: + clk_put(port->clk); + port->clk = NULL; + } ++err_clear_bit: ++ clear_bit(port->uart.line, atmel_ports_in_use); + err: + return ret; + } +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 25d07412e08e..39988fa91294 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -996,8 +996,8 @@ EXPORT_SYMBOL(start_tty); + /* We limit tty time update visibility to every 8 seconds or so. */ + static void tty_update_time(struct timespec *time) + { +- unsigned long sec = get_seconds() & ~7; +- if ((long)(sec - time->tv_sec) > 0) ++ unsigned long sec = get_seconds(); ++ if (abs(sec - time->tv_sec) & ~7) + time->tv_sec = sec; + } + +diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c +index 6fd60fece6b4..22da05d27009 100644 +--- a/drivers/tty/tty_ioctl.c ++++ b/drivers/tty/tty_ioctl.c +@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout) + #endif + if (!timeout) + timeout = MAX_SCHEDULE_TIMEOUT; ++ + if (wait_event_interruptible_timeout(tty->write_wait, +- !tty_chars_in_buffer(tty), timeout) >= 0) { +- if (tty->ops->wait_until_sent) +- tty->ops->wait_until_sent(tty, timeout); ++ !tty_chars_in_buffer(tty), timeout) < 0) { ++ return; + } ++ ++ if (timeout == MAX_SCHEDULE_TIMEOUT) ++ timeout = 0; ++ ++ if (tty->ops->wait_until_sent) ++ tty->ops->wait_until_sent(tty, timeout); + } + EXPORT_SYMBOL(tty_wait_until_sent); + +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 239eae55600a..e341fd52a80d 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed) + #endif + if (DO_UPDATE(vc)) + do_update_region(vc, (unsigned long) p, count); ++ notify_update(vc); + } + + /* used by selection: complement pointer position */ +@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset) + scr_writew(old, screenpos(vc, old_offset, 1)); + if (DO_UPDATE(vc)) + vc->vc_sw->con_putc(vc, old, oldy, oldx); ++ notify_update(vc); + } + + old_offset = offset; +@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset) + oldy = (offset >> 1) / vc->vc_cols; + vc->vc_sw->con_putc(vc, new, oldy, oldx); + } ++ notify_update(vc); + } +- + } + + static void insert_char(struct vc_data *vc, unsigned int nr) +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 2574b24d70c0..e2b4ea7fb2b1 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1542,6 +1542,8 @@ static int acm_reset_resume(struct usb_interface *intf) + + static const struct usb_device_id acm_ids[] = { + /* quirky and broken devices */ ++ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ ++ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ + { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ + .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ + { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ +diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c +index 23559746be92..4596f7e34d20 100644 +--- a/drivers/usb/core/buffer.c ++++ b/drivers/usb/core/buffer.c +@@ -22,17 +22,25 @@ + */ + + /* FIXME tune these based on pool statistics ... */ +-static const size_t pool_max[HCD_BUFFER_POOLS] = { +- /* platforms without dma-friendly caches might need to +- * prevent cacheline sharing... +- */ +- 32, +- 128, +- 512, +- PAGE_SIZE / 2 +- /* bigger --> allocate pages */ ++static size_t pool_max[HCD_BUFFER_POOLS] = { ++ 32, 128, 512, 2048, + }; + ++void __init usb_init_pool_max(void) ++{ ++ /* ++ * The pool_max values must never be smaller than ++ * ARCH_KMALLOC_MINALIGN. ++ */ ++ if (ARCH_KMALLOC_MINALIGN <= 32) ++ ; /* Original value is okay */ ++ else if (ARCH_KMALLOC_MINALIGN <= 64) ++ pool_max[0] = 64; ++ else if (ARCH_KMALLOC_MINALIGN <= 128) ++ pool_max[0] = 0; /* Don't use this pool */ ++ else ++ BUILD_BUG(); /* We don't allow this */ ++} + + /* SETUP primitives */ + +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 31ffd8459456..0b2de7d68a7a 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb) + as->status = urb->status; + signr = as->signr; + if (signr) { ++ memset(&sinfo, 0, sizeof(sinfo)); + sinfo.si_signo = as->signr; + sinfo.si_errno = as->status; + sinfo.si_code = SI_ASYNCIO; +@@ -2229,6 +2230,7 @@ static void usbdev_remove(struct usb_device *udev) + wake_up_all(&ps->wait); + list_del_init(&ps->list); + if (ps->discsignr) { ++ memset(&sinfo, 0, sizeof(sinfo)); + sinfo.si_signo = ps->discsignr; + sinfo.si_errno = EPIPE; + sinfo.si_code = SI_ASYNCIO; +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 830063cb4343..d32755e0c3b1 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1618,6 +1618,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status) + int usb_hcd_unlink_urb (struct urb *urb, int status) + { + struct usb_hcd *hcd; ++ struct usb_device *udev = urb->dev; + int retval = -EIDRM; + unsigned long flags; + +@@ -1629,20 +1630,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) + spin_lock_irqsave(&hcd_urb_unlink_lock, flags); + if (atomic_read(&urb->use_count) > 0) { + retval = 0; +- usb_get_dev(urb->dev); ++ usb_get_dev(udev); + } + spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags); + if (retval == 0) { + hcd = bus_to_hcd(urb->dev->bus); + retval = unlink1(hcd, urb, status); +- usb_put_dev(urb->dev); ++ if (retval == 0) ++ retval = -EINPROGRESS; ++ else if (retval != -EIDRM && retval != -EBUSY) ++ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", ++ urb, retval); ++ usb_put_dev(udev); + } +- +- if (retval == 0) +- retval = -EINPROGRESS; +- else if (retval != -EIDRM && retval != -EBUSY) +- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n", +- urb, retval); + return retval; + } + +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index aa7759583c73..f2121b56e681 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -102,6 +102,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x04f3, 0x009b), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + ++ { USB_DEVICE(0x04f3, 0x010c), .driver_info = ++ USB_QUIRK_DEVICE_QUALIFIER }, ++ + { USB_DEVICE(0x04f3, 0x016f), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c +index 0a6ee2e70b25..eea7a1214a9a 100644 +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -1050,6 +1050,7 @@ static int __init usb_init(void) + pr_info("%s: USB support disabled\n", usbcore_name); + return 0; + } ++ usb_init_pool_max(); + + retval = usb_debugfs_init(); + if (retval) +diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c +index 2a0422b7c42f..662441bebd1b 100644 +--- a/drivers/usb/dwc3/dwc3-omap.c ++++ b/drivers/usb/dwc3/dwc3-omap.c +@@ -215,6 +215,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value) + omap->irq0_offset, value); + } + ++static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value) ++{ ++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC + ++ omap->irqmisc_offset, value); ++} ++ ++static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value) ++{ ++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 - ++ omap->irq0_offset, value); ++} ++ + static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, + enum omap_dwc3_vbus_id_status status) + { +@@ -359,9 +371,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap) + + static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) + { ++ u32 reg; ++ + /* disable all IRQs */ +- dwc3_omap_write_irqmisc_set(omap, 0x00); +- dwc3_omap_write_irq0_set(omap, 0x00); ++ reg = USBOTGSS_IRQO_COREIRQ_ST; ++ dwc3_omap_write_irq0_clr(omap, reg); ++ ++ reg = (USBOTGSS_IRQMISC_OEVT | ++ USBOTGSS_IRQMISC_DRVVBUS_RISE | ++ USBOTGSS_IRQMISC_CHRGVBUS_RISE | ++ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | ++ USBOTGSS_IRQMISC_IDPULLUP_RISE | ++ USBOTGSS_IRQMISC_DRVVBUS_FALL | ++ USBOTGSS_IRQMISC_CHRGVBUS_FALL | ++ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | ++ USBOTGSS_IRQMISC_IDPULLUP_FALL); ++ ++ dwc3_omap_write_irqmisc_clr(omap, reg); + } + + static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); +diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c +index eb3aa817a662..74ff54141416 100644 +--- a/drivers/usb/gadget/f_phonet.c ++++ b/drivers/usb/gadget/f_phonet.c +@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) + return -EINVAL; + + spin_lock(&port->lock); +- __pn_reset(f); ++ ++ if (fp->in_ep->driver_data) ++ __pn_reset(f); ++ + if (alt == 1) { + int i; + +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c +index 8ecf164f0318..a70e4579623c 100644 +--- a/drivers/usb/host/ehci-sched.c ++++ b/drivers/usb/host/ehci-sched.c +@@ -1409,12 +1409,12 @@ iso_stream_schedule ( + next = (next - base) & (mod - 1); + start = (stream->next_uframe - base) & (mod - 1); + +- /* Is the schedule already full? */ ++ /* Is the schedule about to wrap around? */ + if (unlikely(start < period)) { +- ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", ++ ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", + urb, stream->next_uframe, base, + period, mod); +- status = -ENOSPC; ++ status = -EFBIG; + goto fail; + } + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 7dad9e5ad2f3..2a2e1de244d8 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -126,20 +126,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + xhci->quirks |= XHCI_AVOID_BEI; + } + if (pdev->vendor == PCI_VENDOR_ID_INTEL && +- (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI || +- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) { +- /* Workaround for occasional spurious wakeups from S5 (or +- * any other sleep) on Haswell machines with LPT and LPT-LP +- * with the new Intel BIOS +- */ +- /* Limit the quirk to only known vendors, as this triggers +- * yet another BIOS bug on some other machines +- * https://bugzilla.kernel.org/show_bug.cgi?id=66171 +- */ +- if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) +- xhci->quirks |= XHCI_SPURIOUS_WAKEUP; +- } +- if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { + xhci->quirks |= XHCI_SPURIOUS_REBOOT; + } +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 6f052daed694..6bf308798a2d 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -2100,7 +2100,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + if (event_trb != ep_ring->dequeue) { + /* The event was for the status stage */ + if (event_trb == td->last_trb) { +- if (td->urb->actual_length != 0) { ++ if (td->urb_length_set) { + /* Don't overwrite a previously set error code + */ + if ((*status == -EINPROGRESS || *status == 0) && +@@ -2114,7 +2114,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + td->urb->transfer_buffer_length; + } + } else { +- /* Maybe the event was for the data stage? */ ++ /* ++ * Maybe the event was for the data stage? If so, update ++ * already the actual_length of the URB and flag it as ++ * set, so that it is not overwritten in the event for ++ * the last TRB. ++ */ ++ td->urb_length_set = true; + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 1703de9f0509..d14b3e17b906 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1,3 +1,4 @@ ++ + /* + * xHCI host controller driver + * +@@ -88,9 +89,10 @@ struct xhci_cap_regs { + #define HCS_IST(p) (((p) >> 0) & 0xf) + /* bits 4:7, max number of Event Ring segments */ + #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) ++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ + /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ +-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ +-#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) ++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ ++#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) + + /* HCSPARAMS3 - hcs_params3 - bitmasks */ + /* bits 0:7, Max U1 to U0 latency for the roothub ports */ +@@ -1283,6 +1285,8 @@ struct xhci_td { + struct xhci_segment *start_seg; + union xhci_trb *first_trb; + union xhci_trb *last_trb; ++ /* actual_length of the URB has already been set */ ++ bool urb_length_set; + }; + + /* xHCI command default timeout value */ +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c +index 2cca870d9762..7c0c9335a0d9 100644 +--- a/drivers/usb/musb/musb_core.c ++++ b/drivers/usb/musb/musb_core.c +@@ -1843,16 +1843,18 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) + goto fail0; + } + +- pm_runtime_use_autosuspend(musb->controller); +- pm_runtime_set_autosuspend_delay(musb->controller, 200); +- pm_runtime_enable(musb->controller); +- + spin_lock_init(&musb->lock); + musb->board_set_power = plat->set_power; + musb->min_power = plat->min_power; + musb->ops = plat->platform_ops; + musb->port_mode = plat->mode; + ++ /* We need musb_read/write functions initialized for PM */ ++ pm_runtime_use_autosuspend(musb->controller); ++ pm_runtime_set_autosuspend_delay(musb->controller, 200); ++ pm_runtime_irq_safe(musb->controller); ++ pm_runtime_enable(musb->controller); ++ + /* The musb_platform_init() call: + * - adjusts musb->mregs + * - sets the musb->isr +diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig +index ddb9c51f2c99..a9435cd819f8 100644 +--- a/drivers/usb/serial/Kconfig ++++ b/drivers/usb/serial/Kconfig +@@ -59,6 +59,7 @@ config USB_SERIAL_SIMPLE + driver. Specifically, it supports: + - Suunto ANT+ USB device. + - Fundamental Software dongle. ++ - Google USB serial devices + - HP4x calculators + - a number of Motorola phones + - Siemens USB/MPI adapter. +diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c +index 74fc63b2e7fc..a5500cf12e0e 100644 +--- a/drivers/usb/serial/bus.c ++++ b/drivers/usb/serial/bus.c +@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev) + { + struct usb_serial_driver *driver; + struct usb_serial_port *port; ++ struct device *tty_dev; + int retval = 0; + int minor; + +@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev) + retval = device_create_file(dev, &dev_attr_port_number); + if (retval) { + if (driver->port_remove) +- retval = driver->port_remove(port); ++ driver->port_remove(port); + goto exit_with_autopm; + } + + minor = port->minor; +- tty_register_device(usb_serial_tty_driver, minor, dev); ++ tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev); ++ if (IS_ERR(tty_dev)) { ++ retval = PTR_ERR(tty_dev); ++ device_remove_file(dev, &dev_attr_port_number); ++ if (driver->port_remove) ++ driver->port_remove(port); ++ goto exit_with_autopm; ++ } ++ + dev_info(&port->serial->dev->dev, + "%s converter now attached to ttyUSB%d\n", + driver->description, minor); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index b5fa609def53..622d349fd7da 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ ++ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ +@@ -146,6 +147,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ + { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ + { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ ++ { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ ++ { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ + { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 00710ff5ebb8..97abe6bef2f9 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -813,6 +813,8 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, ++ { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), +@@ -992,6 +994,23 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + /* GE Healthcare devices */ + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, ++ /* Active Research (Actisense) devices */ ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index e52409c9be99..56b1b55c4751 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -38,6 +38,9 @@ + + #define FTDI_LUMEL_PD12_PID 0x6002 + ++/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ ++#define CYBER_CORTEX_AV_PID 0x8698 ++ + /* + * Marvell OpenRD Base, Client + * http://www.open-rd.org +@@ -1438,3 +1441,23 @@ + */ + #define GE_HEALTHCARE_VID 0x1901 + #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 ++ ++/* ++ * Active Research (Actisense) devices ++ */ ++#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */ ++#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ ++#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ ++#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ ++#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ ++#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ ++#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ ++#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ ++#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ ++#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ ++#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */ ++#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */ ++#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */ ++#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ ++#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ ++#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c +index dc97744489b0..6e66b5f84f78 100644 +--- a/drivers/usb/serial/generic.c ++++ b/drivers/usb/serial/generic.c +@@ -261,7 +261,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) + * character or at least one jiffy. + */ + period = max_t(unsigned long, (10 * HZ / bps), 1); +- period = min_t(unsigned long, period, timeout); ++ if (timeout) ++ period = min_t(unsigned long, period, timeout); + + dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", + __func__, jiffies_to_msecs(timeout), +@@ -271,7 +272,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) + schedule_timeout_interruptible(period); + if (signal_pending(current)) + break; +- if (time_after(jiffies, expire)) ++ if (timeout && time_after(jiffies, expire)) + break; + } + } +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c +index 147f01971c39..cc61d3781c21 100644 +--- a/drivers/usb/serial/usb-serial-simple.c ++++ b/drivers/usb/serial/usb-serial-simple.c +@@ -51,6 +51,14 @@ DEVICE(funsoft, FUNSOFT_IDS); + { USB_DEVICE(0x8087, 0x0716) } + DEVICE(flashloader, FLASHLOADER_IDS); + ++/* Google Serial USB SubClass */ ++#define GOOGLE_IDS() \ ++ { USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \ ++ USB_CLASS_VENDOR_SPEC, \ ++ 0x50, \ ++ 0x01) } ++DEVICE(google, GOOGLE_IDS); ++ + /* ViVOpay USB Serial Driver */ + #define VIVOPAY_IDS() \ + { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ +@@ -86,6 +94,7 @@ static struct usb_serial_driver * const serial_drivers[] = { + &zio_device, + &funsoft_device, + &flashloader_device, ++ &google_device, + &vivopay_device, + &moto_modem_device, + &hp4x_device, +@@ -98,6 +107,7 @@ static const struct usb_device_id id_table[] = { + ZIO_IDS(), + FUNSOFT_IDS(), + FLASHLOADER_IDS(), ++ GOOGLE_IDS(), + VIVOPAY_IDS(), + MOTO_IDS(), + HP4X_IDS(), +diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c +index 602913d7ae03..edfd797db341 100644 +--- a/drivers/xen/manage.c ++++ b/drivers/xen/manage.c +@@ -113,10 +113,16 @@ static void do_suspend(void) + + err = freeze_processes(); + if (err) { +- pr_err("%s: freeze failed %d\n", __func__, err); ++ pr_err("%s: freeze processes failed %d\n", __func__, err); + goto out; + } + ++ err = freeze_kernel_threads(); ++ if (err) { ++ pr_err("%s: freeze kernel threads failed %d\n", __func__, err); ++ goto out_thaw; ++ } ++ + err = dpm_suspend_start(PMSG_FREEZE); + if (err) { + pr_err("%s: dpm_suspend_start %d\n", __func__, err); +diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c +index 0f00da329e71..792234f15b9f 100644 +--- a/fs/autofs4/dev-ioctl.c ++++ b/fs/autofs4/dev-ioctl.c +@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) + */ + static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) + { +- struct autofs_dev_ioctl tmp; ++ struct autofs_dev_ioctl tmp, *res; + + if (copy_from_user(&tmp, in, sizeof(tmp))) + return ERR_PTR(-EFAULT); +@@ -103,7 +103,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i + if (tmp.size < sizeof(tmp)) + return ERR_PTR(-EINVAL); + +- return memdup_user(in, tmp.size); ++ res = memdup_user(in, tmp.size); ++ if (!IS_ERR(res)) ++ res->size = tmp.size; ++ ++ return res; + } + + static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 4c94a79991bb..c757a131bb4a 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -552,11 +552,12 @@ out: + + static unsigned long randomize_stack_top(unsigned long stack_top) + { +- unsigned int random_variable = 0; ++ unsigned long random_variable = 0; + + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { +- random_variable = get_random_int() & STACK_RND_MASK; ++ random_variable = (unsigned long) get_random_int(); ++ random_variable &= STACK_RND_MASK; + random_variable <<= PAGE_SHIFT; + } + #ifdef CONFIG_STACK_GROWSUP +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index f46ad53626be..3ec1cb0808c3 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2432,7 +2432,7 @@ int open_ctree(struct super_block *sb, + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) +- printk(KERN_ERR "btrfs: has skinny extents\n"); ++ printk(KERN_INFO "btrfs: has skinny extents\n"); + + /* + * flag our filesystem as having big metadata blocks if +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index ad80dfa6cf91..9663f6600973 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1697,22 +1697,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, + mutex_unlock(&inode->i_mutex); + + /* +- * we want to make sure fsync finds this change +- * but we haven't joined a transaction running right now. +- * +- * Later on, someone is sure to update the inode and get the +- * real transid recorded. +- * +- * We set last_trans now to the fs_info generation + 1, +- * this will either be one more than the running transaction +- * or the generation used for the next transaction if there isn't +- * one running right now. +- * + * We also have to set last_sub_trans to the current log transid, + * otherwise subsequent syncs to a file that's been synced in this + * transaction will appear to have already occured. + */ +- BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; + BTRFS_I(inode)->last_sub_trans = root->log_transid; + if (num_written > 0) { + err = generic_write_sync(file, pos, num_written); +@@ -1810,25 +1798,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + atomic_inc(&root->log_batch); + + /* +- * check the transaction that last modified this inode +- * and see if its already been committed +- */ +- if (!BTRFS_I(inode)->last_trans) { +- mutex_unlock(&inode->i_mutex); +- goto out; +- } +- +- /* +- * if the last transaction that changed this file was before +- * the current transaction, we can bail out now without any +- * syncing ++ * If the last transaction that changed this file was before the current ++ * transaction and we have the full sync flag set in our inode, we can ++ * bail out now without any syncing. ++ * ++ * Note that we can't bail out if the full sync flag isn't set. This is ++ * because when the full sync flag is set we start all ordered extents ++ * and wait for them to fully complete - when they complete they update ++ * the inode's last_trans field through: ++ * ++ * btrfs_finish_ordered_io() -> ++ * btrfs_update_inode_fallback() -> ++ * btrfs_update_inode() -> ++ * btrfs_set_inode_last_trans() ++ * ++ * So we are sure that last_trans is up to date and can do this check to ++ * bail out safely. For the fast path, when the full sync flag is not ++ * set in our inode, we can not do it because we start only our ordered ++ * extents and don't wait for them to complete (that is when ++ * btrfs_finish_ordered_io runs), so here at this point their last_trans ++ * value might be less than or equals to fs_info->last_trans_committed, ++ * and setting a speculative last_trans for an inode when a buffered ++ * write is made (such as fs_info->generation + 1 for example) would not ++ * be reliable since after setting the value and before fsync is called ++ * any number of transactions can start and commit (transaction kthread ++ * commits the current transaction periodically), and a transaction ++ * commit does not start nor waits for ordered extents to complete. + */ + smp_mb(); + if (btrfs_inode_in_log(inode, root->fs_info->generation) || +- BTRFS_I(inode)->last_trans <= +- root->fs_info->last_trans_committed) { +- BTRFS_I(inode)->last_trans = 0; +- ++ (full_sync && BTRFS_I(inode)->last_trans <= ++ root->fs_info->last_trans_committed)) { + /* + * We'v had everything committed since the last time we were + * modified so clear this flag in case it was set for whatever +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 68f7a1ff104a..904ed6d7e4bb 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6703,7 +6703,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, + ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && + em->block_start != EXTENT_MAP_HOLE)) { + int type; +- int ret; + u64 block_start, orig_start, orig_block_len, ram_bytes; + + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index e14e1f7748e5..be3bf0be13c7 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -980,7 +980,7 @@ again: + base = btrfs_item_ptr_offset(leaf, path->slots[0]); + + while (cur_offset < item_size) { +- extref = (struct btrfs_inode_extref *)base + cur_offset; ++ extref = (struct btrfs_inode_extref *)(base + cur_offset); + + victim_name_len = btrfs_inode_extref_name_len(leaf, extref); + +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index f3784dd57353..eb6918b70be1 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -245,10 +245,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) + return 0; + } + ++static void debugfs_evict_inode(struct inode *inode) ++{ ++ truncate_inode_pages(&inode->i_data, 0); ++ clear_inode(inode); ++ if (S_ISLNK(inode->i_mode)) ++ kfree(inode->i_private); ++} ++ + static const struct super_operations debugfs_super_operations = { + .statfs = simple_statfs, + .remount_fs = debugfs_remount, + .show_options = debugfs_show_options, ++ .evict_inode = debugfs_evict_inode, + }; + + static int debug_fill_super(struct super_block *sb, void *data, int silent) +@@ -465,23 +474,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) + int ret = 0; + + if (debugfs_positive(dentry)) { +- if (dentry->d_inode) { +- dget(dentry); +- switch (dentry->d_inode->i_mode & S_IFMT) { +- case S_IFDIR: +- ret = simple_rmdir(parent->d_inode, dentry); +- break; +- case S_IFLNK: +- kfree(dentry->d_inode->i_private); +- /* fall through */ +- default: +- simple_unlink(parent->d_inode, dentry); +- break; +- } +- if (!ret) +- d_delete(dentry); +- dput(dentry); +- } ++ dget(dentry); ++ if (S_ISDIR(dentry->d_inode->i_mode)) ++ ret = simple_rmdir(parent->d_inode, dentry); ++ else ++ simple_unlink(parent->d_inode, dentry); ++ if (!ret) ++ d_delete(dentry); ++ dput(dentry); + } + return ret; + } +diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c +index 7654e87b0428..9ad5ba4b299b 100644 +--- a/fs/jffs2/scan.c ++++ b/fs/jffs2/scan.c +@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo + sumlen = c->sector_size - je32_to_cpu(sm->offset); + sumptr = buf + buf_size - sumlen; + ++ /* sm->offset maybe wrong but MAGIC maybe right */ ++ if (sumlen > c->sector_size) ++ goto full_scan; ++ + /* Now, make sure the summary itself is available */ + if (sumlen > buf_size) { + /* Need to kmalloc for this. */ +@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo + } + } + ++full_scan: + buf_ofs = jeb->offset; + + if (!buf_size) { +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index 67cd73213168..f4cac2b06ac3 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp) + if (try_to_freeze()) + continue; + +- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); ++ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE); + spin_lock_bh(&serv->sv_cb_lock); + if (!list_empty(&serv->sv_cb_list)) { + req = list_first_entry(&serv->sv_cb_list, + struct rpc_rqst, rq_bc_list); + list_del(&req->rq_bc_list); + spin_unlock_bh(&serv->sv_cb_lock); ++ finish_wait(&serv->sv_cb_waitq, &wq); + dprintk("Invoking bc_svc_process()\n"); + error = bc_svc_process(serv, req, rqstp); + dprintk("bc_svc_process() returned w/ error code= %d\n", + error); + } else { + spin_unlock_bh(&serv->sv_cb_lock); +- schedule(); ++ /* schedule_timeout to game the hung task watchdog */ ++ schedule_timeout(60 * HZ); ++ finish_wait(&serv->sv_cb_waitq, &wq); + } +- finish_wait(&serv->sv_cb_waitq, &wq); + } + return 0; + } +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c +index f4ccfe6521ec..02f8d09e119f 100644 +--- a/fs/nfs/callback_xdr.c ++++ b/fs/nfs/callback_xdr.c +@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, + + for (i = 0; i < args->csa_nrclists; i++) { + status = decode_rc_list(xdr, &args->csa_rclists[i]); +- if (status) ++ if (status) { ++ args->csa_nrclists = i; + goto out_free; ++ } + } + } + status = 0; +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 3ed1be9aade3..2ea3537b8bde 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -161,8 +161,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, + &delegation->flags); + NFS_I(inode)->delegation_state = delegation->type; + spin_unlock(&delegation->lock); +- put_rpccred(oldcred); + rcu_read_unlock(); ++ put_rpccred(oldcred); + trace_nfs4_reclaim_delegation(inode, res->delegation_type); + } else { + /* We appear to have raced with a delegation return. */ +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c +index b2e3ff347620..ecdbae19a766 100644 +--- a/fs/nilfs2/btree.c ++++ b/fs/nilfs2/btree.c +@@ -31,6 +31,8 @@ + #include "alloc.h" + #include "dat.h" + ++static void __nilfs_btree_init(struct nilfs_bmap *bmap); ++ + static struct nilfs_btree_path *nilfs_btree_alloc_path(void) + { + struct nilfs_btree_path *path; +@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, + return ret; + } + ++/** ++ * nilfs_btree_root_broken - verify consistency of btree root node ++ * @node: btree root node to be examined ++ * @ino: inode number ++ * ++ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. ++ */ ++static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, ++ unsigned long ino) ++{ ++ int level, flags, nchildren; ++ int ret = 0; ++ ++ level = nilfs_btree_node_get_level(node); ++ flags = nilfs_btree_node_get_flags(node); ++ nchildren = nilfs_btree_node_get_nchildren(node); ++ ++ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || ++ level > NILFS_BTREE_LEVEL_MAX || ++ nchildren < 0 || ++ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { ++ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", ++ ino, level, flags, nchildren); ++ ret = 1; ++ } ++ return ret; ++} ++ + int nilfs_btree_broken_node_block(struct buffer_head *bh) + { + int ret; +@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, + + /* convert and insert */ + dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; +- nilfs_btree_init(btree); ++ __nilfs_btree_init(btree); + if (nreq != NULL) { + nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); + nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); +@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { + .bop_gather_data = NULL, + }; + +-int nilfs_btree_init(struct nilfs_bmap *bmap) ++static void __nilfs_btree_init(struct nilfs_bmap *bmap) + { + bmap->b_ops = &nilfs_btree_ops; + bmap->b_nchildren_per_block = + NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); +- return 0; ++} ++ ++int nilfs_btree_init(struct nilfs_bmap *bmap) ++{ ++ int ret = 0; ++ ++ __nilfs_btree_init(bmap); ++ ++ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), ++ bmap->b_inode->i_ino)) ++ ret = -EIO; ++ return ret; + } + + void nilfs_btree_init_gc(struct nilfs_bmap *bmap) +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index cc6e925749de..8add05c84ae5 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -2448,9 +2448,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + struct address_space *mapping = out->f_mapping; + struct inode *inode = mapping->host; + struct splice_desc sd = { +- .total_len = len, + .flags = flags, +- .pos = *ppos, + .u.file = out, + }; + +@@ -2460,6 +2458,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + out->f_path.dentry->d_name.len, + out->f_path.dentry->d_name.name, len); + ++ ret = generic_write_checks(out, ppos, &len, 0); ++ if (ret) ++ return ret; ++ sd.total_len = len; ++ sd.pos = *ppos; ++ + pipe_lock(pipe); + + splice_from_pipe_begin(&sd); +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index 737e15615b04..9638eec27691 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -19,7 +19,6 @@ + #include <linux/mount.h> + #include <linux/init.h> + #include <linux/idr.h> +-#include <linux/namei.h> + #include <linux/bitops.h> + #include <linux/spinlock.h> + #include <linux/completion.h> +@@ -163,17 +162,6 @@ void proc_free_inum(unsigned int inum) + spin_unlock_irqrestore(&proc_inum_lock, flags); + } + +-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) +-{ +- nd_set_link(nd, __PDE_DATA(dentry->d_inode)); +- return NULL; +-} +- +-static const struct inode_operations proc_link_inode_operations = { +- .readlink = generic_readlink, +- .follow_link = proc_follow_link, +-}; +- + /* + * As some entries in /proc are volatile, we want to + * get rid of unused dentries. This could be made +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 8eaa1ba793fc..a5def0c492c4 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -23,6 +23,7 @@ + #include <linux/slab.h> + #include <linux/mount.h> + #include <linux/magic.h> ++#include <linux/namei.h> + + #include <asm/uaccess.h> + +@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = { + }; + #endif + ++static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ struct proc_dir_entry *pde = PDE(dentry->d_inode); ++ if (unlikely(!use_pde(pde))) ++ return ERR_PTR(-EINVAL); ++ nd_set_link(nd, pde->data); ++ return pde; ++} ++ ++static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p) ++{ ++ unuse_pde(p); ++} ++ ++const struct inode_operations proc_link_inode_operations = { ++ .readlink = generic_readlink, ++ .follow_link = proc_follow_link, ++ .put_link = proc_put_link, ++}; ++ + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { + struct inode *inode = new_inode_pseudo(sb); +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 651d09a11dde..8b8ca1db6316 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -202,6 +202,7 @@ struct pde_opener { + int closing; + struct completion *c; + }; ++extern const struct inode_operations proc_link_inode_operations; + + extern const struct inode_operations proc_pid_link_inode_operations; + +diff --git a/fs/splice.c b/fs/splice.c +index 84f810d63c37..c915e215a50e 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, + struct address_space *mapping = out->f_mapping; + struct inode *inode = mapping->host; + struct splice_desc sd = { +- .total_len = len, + .flags = flags, +- .pos = *ppos, + .u.file = out, + }; + ssize_t ret; + ++ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode)); ++ if (ret) ++ return ret; ++ sd.total_len = len; ++ sd.pos = *ppos; ++ + pipe_lock(pipe); + + splice_from_pipe_begin(&sd); +diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c +index f1d85cfc0a54..a0726e475380 100644 +--- a/fs/xfs/xfs_buf_item.c ++++ b/fs/xfs/xfs_buf_item.c +@@ -319,6 +319,10 @@ xfs_buf_item_format( + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); ++ ASSERT((bip->bli_flags & XFS_BLI_STALE) || ++ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF ++ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF)); ++ + + /* + * If it is an inode buffer, transfer the in-memory state to the +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c +index 7a460d8ad06e..e3606f26f82d 100644 +--- a/fs/xfs/xfs_inode.c ++++ b/fs/xfs/xfs_inode.c +@@ -1916,6 +1916,7 @@ xfs_iunlink( + agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + return 0; +@@ -2007,6 +2008,7 @@ xfs_iunlink_remove( + agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + } else { +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 794aa2fb9c69..3868c0aaa724 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1125,6 +1125,11 @@ xfs_qm_reset_dqcounts( + */ + (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, + "xfs_quotacheck"); ++ /* ++ * Reset type in case we are reusing group quota file for ++ * project quotas or vice versa ++ */ ++ ddq->d_flags = type; + ddq->d_bcount = 0; + ddq->d_icount = 0; + ddq->d_rtbcount = 0; +diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c +index 5411e01ab452..b4152a18a99f 100644 +--- a/fs/xfs/xfs_trans.c ++++ b/fs/xfs/xfs_trans.c +@@ -485,6 +485,7 @@ xfs_trans_apply_sb_deltas( + whole = 1; + } + ++ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); + if (whole) + /* + * Log the whole thing, the fields are noncontiguous. +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h +index 1c804b057fb1..7ee1774edee5 100644 +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + new_dir_mask |= FS_ISDIR; + } + +- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); +- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); ++ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, ++ fs_cookie); ++ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, ++ fs_cookie); + + if (target) + fsnotify_link_count(target); +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 511b1a0d6cc2..e492c34439c3 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -90,9 +90,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); + struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); + struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write); ++ pmd_t *pmd, int flags); + struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, +- pud_t *pud, int write); ++ pud_t *pud, int flags); + int pmd_huge(pmd_t pmd); + int pud_huge(pud_t pmd); + unsigned long hugetlb_change_protection(struct vm_area_struct *vma, +@@ -129,8 +129,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) + static inline void hugetlb_show_meminfo(void) + { + } +-#define follow_huge_pmd(mm, addr, pmd, write) NULL +-#define follow_huge_pud(mm, addr, pud, write) NULL ++#define follow_huge_pmd(mm, addr, pmd, flags) NULL ++#define follow_huge_pud(mm, addr, pud, flags) NULL + #define prepare_hugepage_range(file, addr, len) (-EINVAL) + #define pmd_huge(x) 0 + #define pud_huge(x) 0 +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 306f0d4ce7e3..f5965a923d44 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -886,6 +886,7 @@ static inline int page_mapped(struct page *page) + #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ + #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ + #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ ++#define VM_FAULT_SIGSEGV 0x0040 + + #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ + #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +@@ -894,8 +895,9 @@ static inline int page_mapped(struct page *page) + + #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ +- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) ++#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ ++ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ ++ VM_FAULT_FALLBACK) + + /* Encode hstate index for a hwpoisoned large page */ + #define VM_FAULT_SET_HINDEX(x) ((x) << 12) +diff --git a/include/linux/swapops.h b/include/linux/swapops.h +index 8d4fa82bfb91..08a158dbe502 100644 +--- a/include/linux/swapops.h ++++ b/include/linux/swapops.h +@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry) + *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); + } + ++extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, ++ spinlock_t *ptl); + extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address); + extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); +@@ -149,6 +151,8 @@ static inline int is_migration_entry(swp_entry_t swp) + } + #define migration_entry_to_page(swp) NULL + static inline void make_migration_entry_read(swp_entry_t *entryp) { } ++static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, ++ spinlock_t *ptl) { } + static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { } + static inline void migration_entry_wait_huge(struct mm_struct *mm, +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h +index 75efc45eaa2f..d8ee9fd7ca4e 100644 +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -434,6 +434,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops; + #endif /* CONFIG_PCI */ + + /* pci-ish (pdev null is ok) buffer alloc/mapping support */ ++void usb_init_pool_max(void); + int hcd_buffer_create(struct usb_hcd *hcd); + void hcd_buffer_destroy(struct usb_hcd *hcd); + +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 23bfd1028457..38647a3441c9 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -378,7 +378,7 @@ struct t10_reservation { + /* Activate Persistence across Target Power Loss enabled + * for SCSI device */ + int pr_aptpl_active; +-#define PR_APTPL_BUF_LEN 8192 ++#define PR_APTPL_BUF_LEN 262144 + u32 pr_generation; + spinlock_t registration_lock; + spinlock_t aptpl_reg_lock; +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c +index 00eb8f7fbf41..545241de23bf 100644 +--- a/kernel/debug/kdb/kdb_main.c ++++ b/kernel/debug/kdb/kdb_main.c +@@ -2532,7 +2532,7 @@ static int kdb_summary(int argc, const char **argv) + #define K(x) ((x) << (PAGE_SHIFT - 10)) + kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" + "Buffers: %8lu kB\n", +- val.totalram, val.freeram, val.bufferram); ++ K(val.totalram), K(val.freeram), K(val.bufferram)); + return 0; + } + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 691a8ea6f472..1b51436db225 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4599,7 +4599,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, + *fpos += written; + + out_unlock: +- for (i = 0; i < nr_pages; i++){ ++ for (i = nr_pages - 1; i >= 0; i--) { + kunmap_atomic(map_page[i]); + put_page(pages[i]); + } +diff --git a/mm/compaction.c b/mm/compaction.c +index adb6d0560e96..ddcdbe0e42d9 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -935,7 +935,7 @@ static int compact_finished(struct zone *zone, + return COMPACT_PARTIAL; + + /* Job done if allocation would set block type */ +- if (cc->order >= pageblock_order && area->nr_free) ++ if (order >= pageblock_order && area->nr_free) + return COMPACT_PARTIAL; + } + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index c33d8a65298c..ed00a70fb052 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2517,9 +2517,10 @@ again: + continue; + + /* +- * HWPoisoned hugepage is already unmapped and dropped reference ++ * Migrating hugepage or HWPoisoned hugepage is already ++ * unmapped and its refcount is dropped, so just clear pte here. + */ +- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { ++ if (unlikely(!pte_present(pte))) { + huge_pte_clear(mm, address, ptep); + continue; + } +@@ -3175,7 +3176,24 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + pages++; + continue; + } +- if (!huge_pte_none(huge_ptep_get(ptep))) { ++ pte = huge_ptep_get(ptep); ++ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { ++ continue; ++ } ++ if (unlikely(is_hugetlb_entry_migration(pte))) { ++ swp_entry_t entry = pte_to_swp_entry(pte); ++ ++ if (is_write_migration_entry(entry)) { ++ pte_t newpte; ++ ++ make_migration_entry_read(&entry); ++ newpte = swp_entry_to_pte(entry); ++ set_huge_pte_at(mm, address, ptep, newpte); ++ pages++; ++ } ++ continue; ++ } ++ if (!huge_pte_none(pte)) { + pte = huge_ptep_get_and_clear(mm, address, ptep); + pte = pte_mkhuge(huge_pte_modify(pte, newprot)); + pte = arch_make_huge_pte(pte, vma, NULL, 0); +@@ -3460,42 +3478,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) + return (pte_t *) pmd; + } + +-struct page * +-follow_huge_pmd(struct mm_struct *mm, unsigned long address, +- pmd_t *pmd, int write) +-{ +- struct page *page; ++#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ + +- page = pte_page(*(pte_t *)pmd); +- if (page) +- page += ((address & ~PMD_MASK) >> PAGE_SHIFT); +- return page; ++/* ++ * These functions are overwritable if your architecture needs its own ++ * behavior. ++ */ ++struct page * __weak ++follow_huge_addr(struct mm_struct *mm, unsigned long address, ++ int write) ++{ ++ return ERR_PTR(-EINVAL); + } + +-struct page * +-follow_huge_pud(struct mm_struct *mm, unsigned long address, +- pud_t *pud, int write) ++struct page * __weak ++follow_huge_pmd(struct mm_struct *mm, unsigned long address, ++ pmd_t *pmd, int flags) + { +- struct page *page; +- +- page = pte_page(*(pte_t *)pud); +- if (page) +- page += ((address & ~PUD_MASK) >> PAGE_SHIFT); ++ struct page *page = NULL; ++ spinlock_t *ptl; ++retry: ++ ptl = &mm->page_table_lock; ++ spin_lock(ptl); ++ /* ++ * make sure that the address range covered by this pmd is not ++ * unmapped from other threads. ++ */ ++ if (!pmd_huge(*pmd)) ++ goto out; ++ if (pmd_present(*pmd)) { ++ page = pte_page(*(pte_t *)pmd) + ++ ((address & ~PMD_MASK) >> PAGE_SHIFT); ++ if (flags & FOLL_GET) ++ get_page(page); ++ } else { ++ if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { ++ spin_unlock(ptl); ++ __migration_entry_wait(mm, (pte_t *)pmd, ptl); ++ goto retry; ++ } ++ /* ++ * hwpoisoned entry is treated as no_page_table in ++ * follow_page_mask(). ++ */ ++ } ++out: ++ spin_unlock(ptl); + return page; + } + +-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ +- +-/* Can be overriden by architectures */ +-__attribute__((weak)) struct page * ++struct page * __weak + follow_huge_pud(struct mm_struct *mm, unsigned long address, +- pud_t *pud, int write) ++ pud_t *pud, int flags) + { +- BUG(); +- return NULL; +-} ++ if (flags & FOLL_GET) ++ return NULL; + +-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ ++ return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); ++} + + #ifdef CONFIG_MEMORY_FAILURE + +diff --git a/mm/ksm.c b/mm/ksm.c +index 29cbd06c4884..b61ad555184f 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) + else + ret = VM_FAULT_WRITE; + put_page(page); +- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); + /* + * We must loop because handle_mm_fault() may back out if there's + * any difficulty e.g. if pte accessed bit gets updated concurrently. +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 4ab233d4714a..532b4661985c 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1653,8 +1653,6 @@ static int __soft_offline_page(struct page *page, int flags) + * setting PG_hwpoison. + */ + if (!is_free_buddy_page(page)) +- lru_add_drain_all(); +- if (!is_free_buddy_page(page)) + drain_all_pages(); + SetPageHWPoison(page); + if (!is_free_buddy_page(page)) +diff --git a/mm/memory.c b/mm/memory.c +index db2916f5f378..38617f049b9f 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1483,10 +1483,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma, + if (pud_none(*pud)) + goto no_page_table; + if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { +- if (flags & FOLL_GET) ++ page = follow_huge_pud(mm, address, pud, flags); ++ if (page) + goto out; +- page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); +- goto out; ++ goto no_page_table; + } + if (unlikely(pud_bad(*pud))) + goto no_page_table; +@@ -1495,21 +1495,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma, + if (pmd_none(*pmd)) + goto no_page_table; + if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { +- page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); +- if (flags & FOLL_GET) { +- /* +- * Refcount on tail pages are not well-defined and +- * shouldn't be taken. The caller should handle a NULL +- * return when trying to follow tail pages. +- */ +- if (PageHead(page)) +- get_page(page); +- else { +- page = NULL; +- goto out; +- } +- } +- goto out; ++ page = follow_huge_pmd(mm, address, pmd, flags); ++ if (page) ++ goto out; ++ goto no_page_table; + } + if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) + goto no_page_table; +@@ -1836,7 +1825,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + else + return -EFAULT; + } +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | ++ VM_FAULT_SIGSEGV)) + return i ? i : -EFAULT; + BUG(); + } +@@ -1946,7 +1936,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + return -ENOMEM; + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return -EHWPOISON; +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + BUG(); + } +@@ -3225,7 +3215,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; ++ return VM_FAULT_SIGSEGV; + + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { +@@ -4079,7 +4069,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + if (follow_phys(vma, addr, write, &prot, &phys_addr)) + return -EINVAL; + +- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); ++ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); + if (write) + memcpy_toio(maddr + offset, buf, len); + else +diff --git a/mm/migrate.c b/mm/migrate.c +index fac5fa0813c4..66ca0c494b90 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -208,7 +208,7 @@ static void remove_migration_ptes(struct page *old, struct page *new) + * get to the page and wait until migration is finished. + * When we return from this function the fault will be retried. + */ +-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, ++void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, + spinlock_t *ptl) + { + pte_t pte; +@@ -1195,7 +1195,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, + goto put_and_set; + + if (PageHuge(page)) { +- isolate_huge_page(page, &pagelist); ++ if (PageHead(page)) ++ isolate_huge_page(page, &pagelist); + goto put_and_set; + } + +diff --git a/mm/mmap.c b/mm/mmap.c +index 441602d7259a..c3ed083cfb59 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); + */ + int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + { +- unsigned long free, allowed, reserve; ++ long free, allowed, reserve; + + vm_acct_memory(pages); + +@@ -194,7 +194,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + */ + if (mm) { + reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); +- allowed -= min(mm->total_vm / 32, reserve); ++ allowed -= min_t(long, mm->total_vm / 32, reserve); + } + + if (percpu_counter_read_positive(&vm_committed_as) < allowed) +diff --git a/mm/nommu.c b/mm/nommu.c +index 1221d2b66e97..97d19be38233 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -1904,7 +1904,7 @@ EXPORT_SYMBOL(unmap_mapping_range); + */ + int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + { +- unsigned long free, allowed, reserve; ++ long free, allowed, reserve; + + vm_acct_memory(pages); + +@@ -1969,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + */ + if (mm) { + reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); +- allowed -= min(mm->total_vm / 32, reserve); ++ allowed -= min_t(long, mm->total_vm / 32, reserve); + } + + if (percpu_counter_read_positive(&vm_committed_as) < allowed) +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index e6b2db68b4fa..aab733629265 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -974,12 +974,24 @@ static void put_osd(struct ceph_osd *osd) + */ + static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) + { +- dout("__remove_osd %p\n", osd); +- BUG_ON(!list_empty(&osd->o_requests)); +- rb_erase(&osd->o_node, &osdc->osds); ++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd); ++ WARN_ON(!list_empty(&osd->o_requests)); ++ WARN_ON(!list_empty(&osd->o_linger_requests)); ++ + list_del_init(&osd->o_osd_lru); +- ceph_con_close(&osd->o_con); +- put_osd(osd); ++ rb_erase(&osd->o_node, &osdc->osds); ++ RB_CLEAR_NODE(&osd->o_node); ++} ++ ++static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) ++{ ++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd); ++ ++ if (!RB_EMPTY_NODE(&osd->o_node)) { ++ ceph_con_close(&osd->o_con); ++ __remove_osd(osdc, osd); ++ put_osd(osd); ++ } + } + + static void remove_all_osds(struct ceph_osd_client *osdc) +@@ -989,7 +1001,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc) + while (!RB_EMPTY_ROOT(&osdc->osds)) { + struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), + struct ceph_osd, o_node); +- __remove_osd(osdc, osd); ++ remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); + } +@@ -1019,7 +1031,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc) + list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { + if (time_before(jiffies, osd->lru_ttl)) + break; +- __remove_osd(osdc, osd); ++ remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); + } +@@ -1034,8 +1046,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) + dout("__reset_osd %p osd%d\n", osd, osd->o_osd); + if (list_empty(&osd->o_requests) && + list_empty(&osd->o_linger_requests)) { +- __remove_osd(osdc, osd); +- ++ remove_osd(osdc, osd); + return -ENODEV; + } + +@@ -1617,6 +1628,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc) + { + struct rb_node *p, *n; + ++ dout("%s %p\n", __func__, osdc); + for (p = rb_first(&osdc->osds); p; p = n) { + struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); + +diff --git a/net/compat.c b/net/compat.c +index cbc1a2a26587..275af79c131b 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -738,24 +738,18 @@ static unsigned char nas[21] = { + + asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + } + + asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT); + } + + asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + } + +@@ -778,9 +772,6 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + int datagrams; + struct timespec ktspec; + +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; +- + if (timeout == NULL) + return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT, NULL); +diff --git a/net/core/dev.c b/net/core/dev.c +index 249ab7d67254..3ca487e14080 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -935,7 +935,7 @@ bool dev_valid_name(const char *name) + return false; + + while (*name) { +- if (*name == '/' || isspace(*name)) ++ if (*name == '/' || *name == ':' || isspace(*name)) + return false; + name++; + } +diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c +index 9d3d9e78397b..372ac662adf9 100644 +--- a/net/core/gen_stats.c ++++ b/net/core/gen_stats.c +@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) + return 0; + + nla_put_failure: ++ kfree(d->xstats); ++ d->xstats = NULL; ++ d->xstats_len = 0; + spin_unlock_bh(d->lock); + return -1; + } +@@ -217,7 +220,9 @@ int + gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) + { + if (d->compat_xstats) { +- d->xstats = st; ++ d->xstats = kmemdup(st, len, GFP_ATOMIC); ++ if (!d->xstats) ++ goto err_out; + d->xstats_len = len; + } + +@@ -225,6 +230,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) + return gnet_stats_copy(d, TCA_STATS_APP, st, len); + + return 0; ++ ++err_out: ++ d->xstats_len = 0; ++ spin_unlock_bh(d->lock); ++ return -1; + } + EXPORT_SYMBOL(gnet_stats_copy_app); + +@@ -257,6 +267,9 @@ gnet_stats_finish_copy(struct gnet_dump *d) + return -1; + } + ++ kfree(d->xstats); ++ d->xstats = NULL; ++ d->xstats_len = 0; + spin_unlock_bh(d->lock); + return 0; + } +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index a797fff7f222..a104ba3c5768 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -2771,25 +2771,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, + skb->dev = odev; + skb->pkt_type = PACKET_HOST; + ++ pktgen_finalize_skb(pkt_dev, skb, datalen); ++ + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V4_CSUM) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = 0; +- udp4_hwcsum(skb, udph->source, udph->dest); ++ udp4_hwcsum(skb, iph->saddr, iph->daddr); + } else { +- __wsum csum = udp_csum(skb); ++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); + + /* add protocol-dependent pseudo-header */ +- udph->check = csum_tcpudp_magic(udph->source, udph->dest, ++ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + datalen + 8, IPPROTO_UDP, csum); + + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; + } + +- pktgen_finalize_skb(pkt_dev, skb, datalen); +- + #ifdef CONFIG_XFRM + if (!process_ipsec(pkt_dev, skb, protocol)) + return NULL; +@@ -2905,6 +2905,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + skb->dev = odev; + skb->pkt_type = PACKET_HOST; + ++ pktgen_finalize_skb(pkt_dev, skb, datalen); ++ + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V6_CSUM) { +@@ -2913,7 +2915,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + skb->csum_offset = offsetof(struct udphdr, check); + udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); + } else { +- __wsum csum = udp_csum(skb); ++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); + + /* add protocol-dependent pseudo-header */ + udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); +@@ -2922,8 +2924,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + udph->check = CSUM_MANGLED_0; + } + +- pktgen_finalize_skb(pkt_dev, skb, datalen); +- + return skb; + } + +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 5874dfbb8d90..76cc27f3f991 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1202,14 +1202,10 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { + }; + + static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { +- [IFLA_VF_MAC] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_mac) }, +- [IFLA_VF_VLAN] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_vlan) }, +- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_tx_rate) }, +- [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_spoofchk) }, ++ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, ++ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, ++ [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, ++ [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, + }; + + static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { +@@ -1928,8 +1924,16 @@ replay: + } + + err = rtnl_configure_link(dev, ifm); +- if (err < 0) +- unregister_netdevice(dev); ++ if (err < 0) { ++ if (ops->newlink) { ++ LIST_HEAD(list_kill); ++ ++ ops->dellink(dev, &list_kill); ++ unregister_netdevice_many(&list_kill); ++ } else { ++ unregister_netdevice(dev); ++ } ++ } + out: + put_net(dest_net); + return err; +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index b66910aaef4d..4c1884fed548 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -678,27 +678,30 @@ EXPORT_SYMBOL(ip_defrag); + struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) + { + struct iphdr iph; ++ int netoff; + u32 len; + + if (skb->protocol != htons(ETH_P_IP)) + return skb; + +- if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) ++ netoff = skb_network_offset(skb); ++ ++ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) + return skb; + + if (iph.ihl < 5 || iph.version != 4) + return skb; + + len = ntohs(iph.tot_len); +- if (skb->len < len || len < (iph.ihl * 4)) ++ if (skb->len < netoff + len || len < (iph.ihl * 4)) + return skb; + + if (ip_is_fragment(&iph)) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (skb) { +- if (!pskb_may_pull(skb, iph.ihl*4)) ++ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) + return skb; +- if (pskb_trim_rcsum(skb, len)) ++ if (pskb_trim_rcsum(skb, netoff + len)) + return skb; + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + if (ip_defrag(skb, user)) +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 52e82e1709e6..b4cdc79a7fc8 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -842,7 +842,8 @@ static int __ip_append_data(struct sock *sk, + cork->length += length; + if (((length > mtu) || (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && +- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { ++ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && ++ (sk->sk_type == SOCK_DGRAM)) { + err = ip_ufo_append_data(sk, queue, getfrag, from, length, + hh_len, fragheaderlen, transhdrlen, + maxfraglen, flags); +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 81c92f61d77c..a9f8e66f6dad 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -256,6 +256,10 @@ int ping_init_sock(struct sock *sk) + kgid_t low, high; + int ret = 0; + ++#if IS_ENABLED(CONFIG_IPV6) ++ if (sk->sk_family == AF_INET6) ++ inet6_sk(sk)->ipv6only = 1; ++#endif + inet_get_ping_group_range_net(net, &low, &high); + if (gid_lte(low, group) && gid_lte(group, high)) + return 0; +@@ -302,6 +306,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + if (addr_len < sizeof(*addr)) + return -EINVAL; + ++ if (addr->sin_family != AF_INET && ++ !(addr->sin_family == AF_UNSPEC && ++ addr->sin_addr.s_addr == htonl(INADDR_ANY))) ++ return -EAFNOSUPPORT; ++ + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", + sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); + +@@ -326,6 +335,9 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + if (addr_len < sizeof(*addr)) + return -EINVAL; + ++ if (addr->sin6_family != AF_INET6) ++ return -EAFNOSUPPORT; ++ + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", + sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); + +@@ -708,7 +720,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + if (msg->msg_namelen < sizeof(*usin)) + return -EINVAL; + if (usin->sin_family != AF_INET) +- return -EINVAL; ++ return -EAFNOSUPPORT; + daddr = usin->sin_addr.s_addr; + /* no remote port */ + } else { +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 602533d9cb97..855957271830 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1266,7 +1266,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, + if (((length > mtu) || + (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && +- (rt->dst.dev->features & NETIF_F_UFO)) { ++ (rt->dst.dev->features & NETIF_F_UFO) && ++ (sk->sk_type == SOCK_DGRAM)) { + err = ip6_ufo_append_data(sk, getfrag, from, length, + hh_len, fragheaderlen, + transhdrlen, mtu, flags, rt); +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c +index 6acab0bce9d8..f414af6cda43 100644 +--- a/net/ipv6/ping.c ++++ b/net/ipv6/ping.c +@@ -104,9 +104,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + + if (msg->msg_name) { + struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name; +- if (msg->msg_namelen < sizeof(struct sockaddr_in6) || +- u->sin6_family != AF_INET6) { ++ if (msg->msg_namelen < sizeof(*u)) + return -EINVAL; ++ if (u->sin6_family != AF_INET6) { ++ return -EAFNOSUPPORT; + } + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != u->sin6_scope_id) { +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 1d0c5d66d637..0464f9a9d2dc 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -110,7 +110,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) + u32 *p = NULL; + + if (!(rt->dst.flags & DST_HOST)) +- return NULL; ++ return dst_cow_metrics_generic(dst, old); + + peer = rt6_get_peer_create(rt); + if (peer) { +diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c +index 41ac7938268b..2ee29ed13bd4 100644 +--- a/net/irda/ircomm/ircomm_tty.c ++++ b/net/irda/ircomm/ircomm_tty.c +@@ -820,7 +820,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) + orig_jiffies = jiffies; + + /* Set poll time to 200 ms */ +- poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); ++ poll_time = msecs_to_jiffies(200); ++ if (timeout) ++ poll_time = min_t(unsigned long, timeout, poll_time); + + spin_lock_irqsave(&self->spinlock, flags); + while (self->tx_skb && self->tx_skb->len) { +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index c2785b2af97c..d36e0977f44a 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -560,6 +560,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) + if (tx->sdata->control_port_no_encrypt) + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; ++ info->flags |= IEEE80211_TX_CTL_USE_MINRATE; + } + + return TX_CONTINUE; +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index f7a758fae8e5..d1d6b82d2250 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -658,16 +658,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) + return err; + } + +-static int ip_vs_route_me_harder(int af, struct sk_buff *skb) ++static int ip_vs_route_me_harder(int af, struct sk_buff *skb, ++ unsigned int hooknum) + { ++ if (!sysctl_snat_reroute(skb)) ++ return 0; ++ /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */ ++ if (NF_INET_LOCAL_IN == hooknum) ++ return 0; + #ifdef CONFIG_IP_VS_IPV6 + if (af == AF_INET6) { +- if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) ++ struct dst_entry *dst = skb_dst(skb); ++ ++ if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && ++ ip6_route_me_harder(skb) != 0) + return 1; + } else + #endif +- if ((sysctl_snat_reroute(skb) || +- skb_rtable(skb)->rt_flags & RTCF_LOCAL) && ++ if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && + ip_route_me_harder(skb, RTN_LOCAL) != 0) + return 1; + +@@ -790,7 +798,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, + union nf_inet_addr *snet, + __u8 protocol, struct ip_vs_conn *cp, + struct ip_vs_protocol *pp, +- unsigned int offset, unsigned int ihl) ++ unsigned int offset, unsigned int ihl, ++ unsigned int hooknum) + { + unsigned int verdict = NF_DROP; + +@@ -820,7 +829,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb, + #endif + ip_vs_nat_icmp(skb, pp, cp, 1); + +- if (ip_vs_route_me_harder(af, skb)) ++ if (ip_vs_route_me_harder(af, skb, hooknum)) + goto out; + + /* do the statistics and put it back */ +@@ -915,7 +924,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related, + + snet.ip = iph->saddr; + return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, +- pp, ciph.len, ihl); ++ pp, ciph.len, ihl, hooknum); + } + + #ifdef CONFIG_IP_VS_IPV6 +@@ -980,7 +989,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, + snet.in6 = ciph.saddr.in6; + writable = ciph.len; + return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, +- pp, writable, sizeof(struct ipv6hdr)); ++ pp, writable, sizeof(struct ipv6hdr), ++ hooknum); + } + #endif + +@@ -1039,7 +1049,8 @@ static inline bool is_new_conn(const struct sk_buff *skb, + */ + static unsigned int + handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, +- struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) ++ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, ++ unsigned int hooknum) + { + struct ip_vs_protocol *pp = pd->pp; + +@@ -1077,7 +1088,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, + * if it came from this machine itself. So re-compute + * the routing information. + */ +- if (ip_vs_route_me_harder(af, skb)) ++ if (ip_vs_route_me_harder(af, skb, hooknum)) + goto drop; + + IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); +@@ -1180,7 +1191,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) + cp = pp->conn_out_get(af, skb, &iph, 0); + + if (likely(cp)) +- return handle_response(af, skb, pd, cp, &iph); ++ return handle_response(af, skb, pd, cp, &iph, hooknum); + if (sysctl_nat_icmp_send(net) && + (pp->protocol == IPPROTO_TCP || + pp->protocol == IPPROTO_UDP || +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index f4484719f3e6..6d91d760a896 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -891,6 +891,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, + IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); + return; + } ++ if (!(flags & IP_VS_CONN_F_TEMPLATE)) ++ kfree(param->pe_data); + } + + if (opt) +@@ -1164,6 +1166,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) + (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) + ); + #endif ++ ip_vs_pe_put(param.pe); + return 0; + /* Error exit */ + out: +diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c +index 06df2b9110f5..fcea773971ca 100644 +--- a/net/netfilter/xt_socket.c ++++ b/net/netfilter/xt_socket.c +@@ -252,12 +252,13 @@ static int + extract_icmp6_fields(const struct sk_buff *skb, + unsigned int outside_hdrlen, + int *protocol, +- struct in6_addr **raddr, +- struct in6_addr **laddr, ++ const struct in6_addr **raddr, ++ const struct in6_addr **laddr, + __be16 *rport, +- __be16 *lport) ++ __be16 *lport, ++ struct ipv6hdr *ipv6_var) + { +- struct ipv6hdr *inside_iph, _inside_iph; ++ const struct ipv6hdr *inside_iph; + struct icmp6hdr *icmph, _icmph; + __be16 *ports, _ports[2]; + u8 inside_nexthdr; +@@ -272,12 +273,14 @@ extract_icmp6_fields(const struct sk_buff *skb, + if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) + return 1; + +- inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph); ++ inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), ++ sizeof(*ipv6_var), ipv6_var); + if (inside_iph == NULL) + return 1; + inside_nexthdr = inside_iph->nexthdr; + +- inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), ++ inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + ++ sizeof(*ipv6_var), + &inside_nexthdr, &inside_fragoff); + if (inside_hdrlen < 0) + return 1; /* hjm: Packet has no/incomplete transport layer headers. */ +@@ -324,10 +327,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol, + static bool + socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) + { +- struct ipv6hdr *iph = ipv6_hdr(skb); ++ struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb); + struct udphdr _hdr, *hp = NULL; + struct sock *sk = skb->sk; +- struct in6_addr *daddr = NULL, *saddr = NULL; ++ const struct in6_addr *daddr = NULL, *saddr = NULL; + __be16 uninitialized_var(dport), uninitialized_var(sport); + int thoff = 0, uninitialized_var(tproto); + const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; +@@ -351,7 +354,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) + + } else if (tproto == IPPROTO_ICMPV6) { + if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, +- &sport, &dport)) ++ &sport, &dport, &ipv6_var)) + return false; + } else { + return false; +diff --git a/net/sched/ematch.c b/net/sched/ematch.c +index 3a633debb6df..a2abc449ce8f 100644 +--- a/net/sched/ematch.c ++++ b/net/sched/ematch.c +@@ -227,6 +227,7 @@ static int tcf_em_validate(struct tcf_proto *tp, + * to replay the request. + */ + module_put(em->ops->owner); ++ em->ops = NULL; + err = -EAGAIN; + } + #endif +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index a72de074172d..8a6e3b0d25d4 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait, + poll_wait(filp, &queue_wait, wait); + + /* alway allow write */ +- mask = POLL_OUT | POLLWRNORM; ++ mask = POLLOUT | POLLWRNORM; + + if (!rp) + return mask; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index c882d07e56c9..d44bc54f142e 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1404,6 +1404,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) + if (! snd_pcm_playback_empty(substream)) { + snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); + snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); ++ } else { ++ runtime->status->state = SNDRV_PCM_STATE_SETUP; + } + break; + case SNDRV_PCM_STATE_RUNNING: +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 121336b0d3a8..984b75ef1190 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -85,6 +85,7 @@ enum { + STAC_ALIENWARE_M17X, + STAC_92HD89XX_HP_FRONT_JACK, + STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, ++ STAC_92HD73XX_ASUS_MOBO, + STAC_92HD73XX_MODELS + }; + +@@ -1924,7 +1925,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { + [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { + .type = HDA_FIXUP_PINS, + .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, +- } ++ }, ++ [STAC_92HD73XX_ASUS_MOBO] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ /* enable 5.1 and SPDIF out */ ++ { 0x0c, 0x01014411 }, ++ { 0x0d, 0x01014410 }, ++ { 0x0e, 0x01014412 }, ++ { 0x22, 0x014b1180 }, ++ { } ++ } ++ }, + }; + + static const struct hda_model_fixup stac92hd73xx_models[] = { +@@ -1936,6 +1948,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = { + { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, + { .id = STAC_DELL_EQ, .name = "dell-eq" }, + { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, ++ { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" }, + {} + }; + +@@ -1988,6 +2001,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { + "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10", ++ STAC_92HD73XX_ASUS_MOBO), + {} /* terminator */ + }; + +diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c +index 56cc891e395e..d99c8d341e50 100644 +--- a/sound/pci/riptide/riptide.c ++++ b/sound/pci/riptide/riptide.c +@@ -2032,32 +2032,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id) + { + static int dev; + struct gameport *gameport; ++ int ret; + + if (dev >= SNDRV_CARDS) + return -ENODEV; ++ + if (!enable[dev]) { +- dev++; +- return -ENOENT; ++ ret = -ENOENT; ++ goto inc_dev; + } + +- if (!joystick_port[dev++]) +- return 0; ++ if (!joystick_port[dev]) { ++ ret = 0; ++ goto inc_dev; ++ } + + gameport = gameport_allocate_port(); +- if (!gameport) +- return -ENOMEM; ++ if (!gameport) { ++ ret = -ENOMEM; ++ goto inc_dev; ++ } + if (!request_region(joystick_port[dev], 8, "Riptide gameport")) { + snd_printk(KERN_WARNING + "Riptide: cannot grab gameport 0x%x\n", + joystick_port[dev]); + gameport_free_port(gameport); +- return -EBUSY; ++ ret = -EBUSY; ++ goto inc_dev; + } + + gameport->io = joystick_port[dev]; + gameport_register_port(gameport); + pci_set_drvdata(pci, gameport); +- return 0; ++ ++ ret = 0; ++inc_dev: ++ dev++; ++ return ret; + } + + static void snd_riptide_joystick_remove(struct pci_dev *pci) +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 3cde55b753e2..9585e316a5c6 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -6107,6 +6107,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream) + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, + 64, 8192); ++ snd_pcm_hw_constraint_minmax(runtime, ++ SNDRV_PCM_HW_PARAM_PERIODS, ++ 2, 2); + break; + } + +@@ -6181,6 +6184,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream) + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, + 64, 8192); ++ snd_pcm_hw_constraint_minmax(runtime, ++ SNDRV_PCM_HW_PARAM_PERIODS, ++ 2, 2); + break; + } + +diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c +index a11405de86e8..137ab9c05798 100644 +--- a/sound/soc/omap/omap-pcm.c ++++ b/sound/soc/omap/omap-pcm.c +@@ -156,7 +156,7 @@ static struct snd_pcm_ops omap_pcm_ops = { + .mmap = omap_pcm_mmap, + }; + +-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64); ++static u64 omap_pcm_dmamask = DMA_BIT_MASK(32); + + static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, + int stream) +@@ -207,7 +207,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) + if (!card->dev->dma_mask) + card->dev->dma_mask = &omap_pcm_dmamask; + if (!card->dev->coherent_dma_mask) +- card->dev->coherent_dma_mask = DMA_BIT_MASK(64); ++ card->dev->coherent_dma_mask = DMA_BIT_MASK(32); + + if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { + ret = omap_pcm_preallocate_dma_buffer(pcm, |