diff options
author | Mike Pagano <mpagano@gentoo.org> | 2016-03-17 18:52:12 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2016-03-17 18:52:12 -0400 |
commit | 57f73735ff7386b833f9793004b2a35de5cb2703 (patch) | |
tree | 7f85d49d3e9e886a27c0d619a25ca99ad6bbc5bb | |
parent | Linux patch 4.1.19 (diff) | |
download | linux-patches-4.1-25.tar.gz linux-patches-4.1-25.tar.bz2 linux-patches-4.1-25.zip |
Linux patch 4.1.204.1-25
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1019_linux-4.1.20.patch | 3955 |
2 files changed, 3959 insertions, 0 deletions
diff --git a/0000_README b/0000_README index ad1d3727..3e197855 100644 --- a/0000_README +++ b/0000_README @@ -119,6 +119,10 @@ Patch: 1018_linux-4.1.19.patch From: http://www.kernel.org Desc: Linux 4.1.19 +Patch: 1019_linux-4.1.20.patch +From: http://www.kernel.org +Desc: Linux 4.1.20 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1019_linux-4.1.20.patch b/1019_linux-4.1.20.patch new file mode 100644 index 00000000..75d3dea7 --- /dev/null +++ b/1019_linux-4.1.20.patch @@ -0,0 +1,3955 @@ +diff --git a/Makefile b/Makefile +index 06107f683bbe..39be1bbd373a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 1 +-SUBLEVEL = 19 ++SUBLEVEL = 20 + EXTRAVERSION = + NAME = Series 4800 + +diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c +index d503fbb787d3..88993cc95e8e 100644 +--- a/arch/arm/kvm/guest.c ++++ b/arch/arm/kvm/guest.c +@@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + u64 val; + + val = kvm_arm_timer_get_reg(vcpu, reg->id); +- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); ++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; + } + + static unsigned long num_core_regs(void) +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c +index 9535bd555d1d..d4e04d2237c4 100644 +--- a/arch/arm64/kvm/guest.c ++++ b/arch/arm64/kvm/guest.c +@@ -184,7 +184,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + u64 val; + + val = kvm_arm_timer_get_reg(vcpu, reg->id); +- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); ++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; + } + + /** +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 5f5f44edc77d..54923d6b7e16 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -693,15 +693,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) + asmlinkage void do_ov(struct pt_regs *regs) + { + enum ctx_state prev_state; +- siginfo_t info; ++ siginfo_t info = { ++ .si_signo = SIGFPE, ++ .si_code = FPE_INTOVF, ++ .si_addr = (void __user *)regs->cp0_epc, ++ }; + + prev_state = exception_enter(); + die_if_kernel("Integer overflow", regs); + +- info.si_code = FPE_INTOVF; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); + } +@@ -877,7 +877,7 @@ out: + void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) + { +- siginfo_t info; ++ siginfo_t info = { 0 }; + char b[40]; + + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -905,7 +905,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +- info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c +index 22ee0afc7d5d..ace4ed7d41c6 100644 +--- a/arch/mips/kvm/mips.c ++++ b/arch/mips/kvm/mips.c +@@ -700,7 +700,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + +- return copy_to_user(uaddr, vs, 16); ++ return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; + } else { + return -EINVAL; + } +@@ -730,7 +730,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + +- return copy_from_user(vs, uaddr, 16); ++ return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; + } else { + return -EINVAL; + } +diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c +index 9585c81f755f..ce0b2b4075c7 100644 +--- a/arch/parisc/kernel/ptrace.c ++++ b/arch/parisc/kernel/ptrace.c +@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + + long do_syscall_trace_enter(struct pt_regs *regs) + { +- long ret = 0; +- + /* Do the secure computing check first. */ + secure_computing_strict(regs->gr[20]); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && +- tracehook_report_syscall_entry(regs)) +- ret = -1L; ++ tracehook_report_syscall_entry(regs)) { ++ /* ++ * Tracing decided this syscall should not happen or the ++ * debugger stored an invalid system call number. Skip ++ * the system call and the system call restart handling. ++ */ ++ regs->gr[20] = -1UL; ++ goto out; ++ } + + #ifdef CONFIG_64BIT + if (!is_compat_task()) +@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) + regs->gr[24] & 0xffffffff, + regs->gr[23] & 0xffffffff); + +- return ret ? : regs->gr[20]; ++out: ++ return regs->gr[20]; + } + + void do_syscall_trace_exit(struct pt_regs *regs) +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 0b8d26d3ba43..02cf40c96fe3 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -343,7 +343,7 @@ tracesys_next: + #endif + + comiclr,>>= __NR_Linux_syscalls, %r20, %r0 +- b,n .Lsyscall_nosys ++ b,n .Ltracesys_nosys + + LDREGX %r20(%r19), %r19 + +@@ -359,6 +359,9 @@ tracesys_next: + be 0(%sr7,%r19) + ldo R%tracesys_exit(%r2),%r2 + ++.Ltracesys_nosys: ++ ldo -ENOSYS(%r0),%r28 /* set errno */ ++ + /* Do *not* call this function on the gateway page, because it + makes a direct call to syscall_trace. */ + +diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c +index c78ba51ae285..24b7e554db27 100644 +--- a/arch/s390/kernel/compat_signal.c ++++ b/arch/s390/kernel/compat_signal.c +@@ -293,7 +293,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs, + + /* Restore high gprs from signal stack */ + if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, +- sizeof(&sregs_ext->gprs_high))) ++ sizeof(sregs_ext->gprs_high))) + return -EFAULT; + for (i = 0; i < NUM_GPRS; i++) + *(__u32 *)®s->gprs[i] = gprs_high[i]; +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index d1daead5fcdd..adb3eaf8fe2a 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include <asm/cacheflush.h> + #include <asm/realmode.h> + ++#include <linux/ftrace.h> + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h +index 6e6d115fe9b5..d537c9badeb6 100644 +--- a/arch/x86/kvm/paging_tmpl.h ++++ b/arch/x86/kvm/paging_tmpl.h +@@ -257,7 +257,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, + return ret; + + mark_page_dirty(vcpu->kvm, table_gfn); +- walker->ptes[level] = pte; ++ walker->ptes[level - 1] = pte; + } + return 0; + } +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index fed4c84eac44..41a3fb4ed346 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2117,6 +2117,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2262,12 +2264,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2966,7 +2962,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +@@ -6371,12 +6366,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + * KVM_DEBUGREG_WONT_EXIT again. + */ + if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { +- int i; +- + WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); + kvm_x86_ops->sync_dirty_debug_regs(vcpu); +- for (i = 0; i < KVM_NR_DB_REGS; i++) +- vcpu->arch.eff_db[i] = vcpu->arch.db[i]; ++ kvm_update_dr0123(vcpu); ++ kvm_update_dr6(vcpu); ++ kvm_update_dr7(vcpu); ++ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; + } + + /* +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c +index f738c61bc891..6a3c774eaff6 100644 +--- a/arch/x86/mm/mpx.c ++++ b/arch/x86/mm/mpx.c +@@ -142,7 +142,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, + break; + } + +- if (regno > nr_registers) { ++ if (regno >= nr_registers) { + WARN_ONCE(1, "decoded an instruction with an invalid register"); + return -EINVAL; + } +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 6607f3c6ace1..f1a26d937d98 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc, + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + +- ptr += sizeof(void *); ++ ptr += sizeof(cookie); + list_for_each_entry(w, &proc->delivered_death, entry) { + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 666fd8a1500a..34825d63d483 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -332,6 +332,16 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ ++ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ ++ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ ++ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ +@@ -362,6 +372,22 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ ++ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ ++ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ ++ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ ++ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + + /* JMicron 360/1/3/5/6, match class to avoid IDE function */ + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 0d7f0da3a269..ae7cfcb562dc 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 035dacc93382..fd5c5f3370f6 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index 91764320c56f..a56eab7f0ab1 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -1079,12 +1079,6 @@ force: + + /* update display watermarks based on new power state */ + radeon_bandwidth_update(rdev); +- /* update displays */ +- radeon_dpm_display_configuration_changed(rdev); +- +- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; +- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; +- rdev->pm.dpm.single_display = single_display; + + /* wait for the rings to drain */ + for (i = 0; i < RADEON_NUM_RINGS; i++) { +@@ -1101,6 +1095,13 @@ force: + + radeon_dpm_post_set_power_state(rdev); + ++ /* update displays */ ++ radeon_dpm_display_configuration_changed(rdev); ++ ++ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; ++ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; ++ rdev->pm.dpm.single_display = single_display; ++ + if (rdev->asic->dpm.force_performance_level) { + if (rdev->pm.dpm.thermal_active) { + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c +index f155b8380481..2b3105c8aed3 100644 +--- a/drivers/hwmon/ads1015.c ++++ b/drivers/hwmon/ads1015.c +@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel, + struct ads1015_data *data = i2c_get_clientdata(client); + unsigned int pga = data->channel_data[channel].pga; + int fullscale = fullscale_table[pga]; +- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; ++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0; + + return DIV_ROUND_CLOSEST(reg * fullscale, mask); + } +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c +index a3dae6d0082a..83ea8c8039fa 100644 +--- a/drivers/hwmon/gpio-fan.c ++++ b/drivers/hwmon/gpio-fan.c +@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) + { + struct gpio_fan_data *fan_data = cdev->devdata; +- int r; + + if (!fan_data) + return -EINVAL; + +- r = get_fan_speed_index(fan_data); +- if (r < 0) +- return r; +- +- *state = r; ++ *state = fan_data->speed_index; + return 0; + } + +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 450ef5001a65..1750db0ef61c 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -227,6 +227,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; + static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write); ++ + static inline void update_last_devid(u16 devid) + { + if (devid > amd_iommu_last_bdf) +@@ -1066,6 +1070,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) + } + + /* ++ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) ++ * Workaround: ++ * BIOS should enable ATS write permission check by setting ++ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b ++ */ ++static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) ++{ ++ u32 value; ++ ++ if ((boot_cpu_data.x86 != 0x15) || ++ (boot_cpu_data.x86_model < 0x30) || ++ (boot_cpu_data.x86_model > 0x3f)) ++ return; ++ ++ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ ++ value = iommu_read_l2(iommu, 0x47); ++ ++ if (value & BIT(0)) ++ return; ++ ++ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ ++ iommu_write_l2(iommu, 0x47, value | BIT(0)); ++ ++ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n", ++ dev_name(&iommu->dev->dev)); ++} ++ ++/* + * This function clues the initialization function for one IOMMU + * together and also allocates the command buffer and programs the + * hardware. It does NOT enable the IOMMU. This is done afterwards. +@@ -1192,8 +1224,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ +- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || +- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || ++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || ++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; +@@ -1339,6 +1371,7 @@ static int iommu_init_pci(struct amd_iommu *iommu) + } + + amd_iommu_erratum_746_workaround(iommu); ++ amd_iommu_ats_write_check_workaround(iommu); + + iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, + amd_iommu_groups, "ivhd%d", +@@ -2362,22 +2395,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid) + } + EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) + { +- struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + +- /* Make sure the IOMMU PC resource is available */ +- if (!amd_iommu_pc_present) +- return -ENODEV; +- +- /* Locate the iommu associated with the device ID */ +- iommu = amd_iommu_rlookup_table[devid]; +- + /* Check for valid iommu and pc register indexing */ +- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) ++ if (WARN_ON((fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); +@@ -2401,3 +2427,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + return 0; + } + EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); ++ ++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write) ++{ ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ /* Make sure the IOMMU PC resource is available */ ++ if (!amd_iommu_pc_present || iommu == NULL) ++ return -ENODEV; ++ ++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, ++ value, is_write); ++} +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 8b72ceee0f61..62610aafaac7 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1146,6 +1146,8 @@ static void dm_unprep_request(struct request *rq) + + if (clone) + free_rq_clone(clone); ++ else if (!tio->md->queue->mq_ops) ++ free_rq_tio(tio); + } + + /* +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c +index 60ffcf098bef..5f92ec23bb07 100644 +--- a/drivers/media/i2c/adv7604.c ++++ b/drivers/media/i2c/adv7604.c +@@ -1911,10 +1911,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + } + + /* tx 5v detect */ +- tx_5v = io_read(sd, 0x70) & info->cable_det_mask; ++ tx_5v = irq_reg_0x70 & info->cable_det_mask; + if (tx_5v) { + v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v); +- io_write(sd, 0x71, tx_5v); + adv76xx_s_detect_tx_5v_ctrl(sd); + if (handled) + *handled = true; +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index 866bac0ae7e9..339b0c5ce60c 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2"); + */ + #define EMS_USB_ARM7_CLOCK 8000000 + ++#define CPC_TX_QUEUE_TRIGGER_LOW 25 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35 ++ + /* + * CAN-Message representation in a CPC_MSG. Message object type is + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or +@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) + switch (urb->status) { + case 0: + dev->free_slots = dev->intr_in_buffer[1]; ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ ++ if (netif_queue_stopped(netdev)){ ++ netif_wake_queue(netdev); ++ } ++ } + break; + + case -ECONNRESET: /* unlink */ +@@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb) + /* Release context */ + context->echo_index = MAX_TX_URBS; + +- if (netif_queue_stopped(netdev)) +- netif_wake_queue(netdev); + } + + /* +@@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev) + int err, i; + + dev->intr_in_buffer[0] = 0; +- dev->free_slots = 15; /* initial size */ ++ dev->free_slots = 50; /* initial size */ + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; +@@ -838,7 +844,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne + + /* Slow down tx path */ + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || +- dev->free_slots < 5) { ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { + netif_stop_queue(netdev); + } + } +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +index 9779c1e5688c..90e8b662e44d 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +@@ -2797,6 +2797,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + ++ /* we don't support "match all" in the firmware */ ++ if (!req->n_match_sets) ++ return -EOPNOTSUPP; ++ + if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS); + if (ret) +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c +index 7cfd2db02deb..914655e89677 100644 +--- a/drivers/pci/xen-pcifront.c ++++ b/drivers/pci/xen-pcifront.c +@@ -52,7 +52,7 @@ struct pcifront_device { + }; + + struct pcifront_sd { +- int domain; ++ struct pci_sysdata sd; + struct pcifront_device *pdev; + }; + +@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, + unsigned int domain, unsigned int bus, + struct pcifront_device *pdev) + { +- sd->domain = domain; ++ /* Because we do not expose that information via XenBus. */ ++ sd->sd.node = first_online_node; ++ sd->sd.domain = domain; + sd->pdev = pdev; + } + +@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", + domain, bus); + +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); +- sd = kmalloc(sizeof(*sd), GFP_KERNEL); ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!bus_entry || !sd) { + err = -ENOMEM; + goto err_out; +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 2e58279fab60..6f50e9d958de 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4095,6 +4095,17 @@ reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + ++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) ++{ ++ bool ret; ++ ++ spin_lock_bh(&conn->state_lock); ++ ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); ++ spin_unlock_bh(&conn->state_lock); ++ ++ return ret; ++} ++ + int iscsi_target_rx_thread(void *arg) + { + int ret, rc; +@@ -4112,7 +4123,7 @@ int iscsi_target_rx_thread(void *arg) + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); +- if (rc < 0) ++ if (rc < 0 || iscsi_target_check_conn_state(conn)) + return 0; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index f9cde9141836..9a96f1712b7a 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -393,6 +393,7 @@ err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); ++ complete(&conn->rx_login_comp); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 08aa7cc58694..57fd4e14d4eb 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -329,7 +329,7 @@ static int core_scsi3_pr_seq_non_holder( + * RESERVATION CONFLICT on some CDBs */ + + if (!se_sess->se_node_acl->device_list) +- return; ++ return 0; + + se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; + /* +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 733824e3825f..46b966d09af2 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -321,7 +321,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + return 0; + } + +-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) ++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + unsigned char *buf, *addr; + struct scatterlist *sg; +@@ -385,7 +386,8 @@ sbc_execute_rw(struct se_cmd *cmd) + cmd->data_direction); + } + +-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) ++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + struct se_device *dev = cmd->se_dev; + +@@ -395,8 +397,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) + * sent to the backend driver. + */ + spin_lock_irq(&cmd->t_state_lock); +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) ++ if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; ++ *post_ret = 1; ++ } + spin_unlock_irq(&cmd->t_state_lock); + + /* +@@ -408,7 +412,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) + return TCM_NO_SENSE; + } + +-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) ++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + struct se_device *dev = cmd->se_dev; + struct scatterlist *write_sg = NULL, *sg; +@@ -504,11 +509,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes + + if (block_size < PAGE_SIZE) { + sg_set_page(&write_sg[i], m.page, block_size, +- block_size); ++ m.piter.sg->offset + block_size); + } else { + sg_miter_next(&m); + sg_set_page(&write_sg[i], m.page, block_size, +- 0); ++ m.piter.sg->offset); + } + len -= block_size; + i++; +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index adb8016955c4..ad48837ead42 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -78,16 +78,18 @@ void core_tmr_release_req(struct se_tmr_req *tmr) + kfree(tmr); + } + +-static void core_tmr_handle_tas_abort( +- struct se_node_acl *tmr_nacl, +- struct se_cmd *cmd, +- int tas) ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + { +- bool remove = true; ++ unsigned long flags; ++ bool remove = true, send_tas; + /* + * TASK ABORTED status (TAS) bit support + */ +- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ send_tas = (cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ ++ if (send_tas) { + remove = false; + transport_send_task_abort(cmd); + } +@@ -110,6 +112,47 @@ static int target_check_cdb_and_preempt(struct list_head *list, + return 1; + } + ++static bool __target_check_io_state(struct se_cmd *se_cmd, ++ struct se_session *tmr_sess, int tas) ++{ ++ struct se_session *sess = se_cmd->se_sess; ++ ++ assert_spin_locked(&sess->sess_cmd_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ /* ++ * If command already reached CMD_T_COMPLETE state within ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, ++ * this se_cmd has been passed to fabric driver and will ++ * not be aborted. ++ * ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as ++ * long as se_cmd->cmd_kref is still active unless zero. ++ */ ++ spin_lock(&se_cmd->t_state_lock); ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { ++ pr_debug("Attempted to abort io tag: %u already complete or" ++ " fabric stop, skipping\n", ++ se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { ++ pr_debug("Attempted to abort io tag: %u already shutdown," ++ " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ se_cmd->transport_state |= CMD_T_ABORTED; ++ ++ if ((tmr_sess != se_cmd->se_sess) && tas) ++ se_cmd->transport_state |= CMD_T_TAS; ++ ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ return kref_get_unless_zero(&se_cmd->cmd_kref); ++} ++ + void core_tmr_abort_task( + struct se_device *dev, + struct se_tmr_req *tmr, +@@ -136,25 +179,20 @@ void core_tmr_abort_task( + printk("ABORT_TASK: Found referenced %s task_tag: %u\n", + se_cmd->se_tfo->get_fabric_name(), ref_tag); + +- spin_lock(&se_cmd->t_state_lock); +- if (se_cmd->transport_state & CMD_T_COMPLETE) { +- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); +- spin_unlock(&se_cmd->t_state_lock); ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ target_put_sess_cmd(se_cmd); + goto out; + } +- se_cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock(&se_cmd->t_state_lock); + + list_del_init(&se_cmd->se_cmd_list); +- kref_get(&se_cmd->cmd_kref); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + cancel_work_sync(&se_cmd->work); + transport_wait_for_tasks(se_cmd); + +- target_put_sess_cmd(se_cmd); + transport_cmd_finish_abort(se_cmd, true); ++ target_put_sess_cmd(se_cmd); + + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" + " ref_tag: %d\n", ref_tag); +@@ -211,7 +249,8 @@ static void core_tmr_drain_tmr_list( + + spin_lock(&sess->sess_cmd_lock); + spin_lock(&cmd->t_state_lock); +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { ++ if (!(cmd->transport_state & CMD_T_ACTIVE) || ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) { + spin_unlock(&cmd->t_state_lock); + spin_unlock(&sess->sess_cmd_lock); + continue; +@@ -221,15 +260,22 @@ static void core_tmr_drain_tmr_list( + spin_unlock(&sess->sess_cmd_lock); + continue; + } ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) { ++ spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } + cmd->transport_state |= CMD_T_ABORTED; + spin_unlock(&cmd->t_state_lock); + + rc = kref_get_unless_zero(&cmd->cmd_kref); +- spin_unlock(&sess->sess_cmd_lock); + if (!rc) { + printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } ++ spin_unlock(&sess->sess_cmd_lock); ++ + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); +@@ -254,13 +300,15 @@ static void core_tmr_drain_tmr_list( + static void core_tmr_drain_state_list( + struct se_device *dev, + struct se_cmd *prout_cmd, +- struct se_node_acl *tmr_nacl, ++ struct se_session *tmr_sess, + int tas, + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_task_list); ++ struct se_session *sess; + struct se_cmd *cmd, *next; + unsigned long flags; ++ int rc; + + /* + * Complete outstanding commands with TASK_ABORTED SAM status. +@@ -299,6 +347,16 @@ static void core_tmr_drain_state_list( + if (prout_cmd == cmd) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); ++ rc = __target_check_io_state(cmd, tmr_sess, tas); ++ spin_unlock(&sess->sess_cmd_lock); ++ if (!rc) ++ continue; ++ + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } +@@ -306,7 +364,7 @@ static void core_tmr_drain_state_list( + + while (!list_empty(&drain_task_list)) { + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); +- list_del(&cmd->state_list); ++ list_del_init(&cmd->state_list); + + pr_debug("LUN_RESET: %s cmd: %p" + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +@@ -330,16 +388,11 @@ static void core_tmr_drain_state_list( + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ +- if (cmd->t_state == TRANSPORT_COMPLETE) +- cancel_work_sync(&cmd->work); +- +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- target_stop_cmd(cmd, &flags); +- +- cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); + +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); ++ core_tmr_handle_tas_abort(cmd, tas); ++ target_put_sess_cmd(cmd); + } + } + +@@ -351,6 +404,7 @@ int core_tmr_lun_reset( + { + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; ++ struct se_session *tmr_sess = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS +@@ -369,8 +423,9 @@ int core_tmr_lun_reset( + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg; ++ tmr_sess = tmr->task_cmd->se_sess; ++ tmr_nacl = tmr_sess->se_node_acl; ++ tmr_tpg = tmr_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", +@@ -383,7 +438,7 @@ int core_tmr_lun_reset( + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, + preempt_and_abort_list); + + /* +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 3881504b40d8..be12b9d84052 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -555,9 +555,6 @@ void transport_deregister_session(struct se_session *se_sess) + } + EXPORT_SYMBOL(transport_deregister_session); + +-/* +- * Called with cmd->t_state_lock held. +- */ + static void target_remove_from_state_list(struct se_cmd *cmd) + { + struct se_device *dev = cmd->se_dev; +@@ -582,10 +579,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + { + unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (write_pending) +- cmd->t_state = TRANSPORT_WRITE_PENDING; +- + if (remove_from_lists) { + target_remove_from_state_list(cmd); + +@@ -595,6 +588,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + cmd->se_lun = NULL; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (write_pending) ++ cmd->t_state = TRANSPORT_WRITE_PENDING; ++ + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. +@@ -649,6 +646,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) + + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + { ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); ++ + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) + transport_lun_remove_cmd(cmd); + /* +@@ -660,7 +659,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + + if (transport_cmd_check_stop_to_fabric(cmd)) + return; +- if (remove) ++ if (remove && ack_kref) + transport_put_cmd(cmd); + } + +@@ -728,7 +727,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + * Check for case where an explicit ABORT_TASK has been received + * and transport_wait_for_tasks() will be waiting for completion.. + */ +- if (cmd->transport_state & CMD_T_ABORTED && ++ if (cmd->transport_state & CMD_T_ABORTED || + cmd->transport_state & CMD_T_STOP) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete_all(&cmd->t_transport_stop_comp); +@@ -1638,7 +1637,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) + void transport_generic_request_failure(struct se_cmd *cmd, + sense_reason_t sense_reason) + { +- int ret = 0; ++ int ret = 0, post_ret = 0; + + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), +@@ -1661,7 +1660,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, + */ + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->transport_complete_callback) +- cmd->transport_complete_callback(cmd, false); ++ cmd->transport_complete_callback(cmd, false, &post_ret); + + switch (sense_reason) { + case TCM_NON_EXISTENT_LUN: +@@ -1836,19 +1835,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) + return true; + } + ++static int __transport_check_aborted_status(struct se_cmd *, int); ++ + void target_execute_cmd(struct se_cmd *cmd) + { + /* +- * If the received CDB has aleady been aborted stop processing it here. +- */ +- if (transport_check_aborted_status(cmd, 1)) +- return; +- +- /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. ++ * ++ * If the received CDB has aleady been aborted stop processing it here. + */ + spin_lock_irq(&cmd->t_state_lock); ++ if (__transport_check_aborted_status(cmd, 1)) { ++ spin_unlock_irq(&cmd->t_state_lock); ++ return; ++ } + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", + __func__, __LINE__, +@@ -2056,11 +2057,13 @@ static void target_complete_ok_work(struct work_struct *work) + */ + if (cmd->transport_complete_callback) { + sense_reason_t rc; ++ bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); ++ bool zero_dl = !(cmd->data_length); ++ int post_ret = 0; + +- rc = cmd->transport_complete_callback(cmd, true); +- if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { +- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && +- !cmd->data_length) ++ rc = cmd->transport_complete_callback(cmd, true, &post_ret); ++ if (!rc && !post_ret) { ++ if (caw && zero_dl) + goto queue_rsp; + + return; +@@ -2209,20 +2212,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) + } + + /** +- * transport_release_cmd - free a command +- * @cmd: command to free ++ * transport_put_cmd - release a reference to a command ++ * @cmd: command to release + * +- * This routine unconditionally frees a command, and reference counting +- * or list removal must be done in the caller. ++ * This routine releases our reference to the command and frees it if possible. + */ +-static int transport_release_cmd(struct se_cmd *cmd) ++static int transport_put_cmd(struct se_cmd *cmd) + { + BUG_ON(!cmd->se_tfo); +- +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) +- core_tmr_release_req(cmd->se_tmr_req); +- if (cmd->t_task_cdb != cmd->__t_task_cdb) +- kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. +@@ -2230,18 +2227,6 @@ static int transport_release_cmd(struct se_cmd *cmd) + return target_put_sess_cmd(cmd); + } + +-/** +- * transport_put_cmd - release a reference to a command +- * @cmd: command to release +- * +- * This routine releases our reference to the command and frees it if possible. +- */ +-static int transport_put_cmd(struct se_cmd *cmd) +-{ +- transport_free_pages(cmd); +- return transport_release_cmd(cmd); +-} +- + void *transport_kmap_data_sg(struct se_cmd *cmd) + { + struct scatterlist *sg = cmd->t_data_sg; +@@ -2437,34 +2422,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd) + } + } + +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++static bool ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, ++ unsigned long *flags); ++ ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) + { + unsigned long flags; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++} ++ ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++{ + int ret = 0; ++ bool aborted = false, tas = false; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + +- ret = transport_release_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); + } else { + if (wait_for_tasks) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ +- if (cmd->state_active) { +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->state_active) + target_remove_from_state_list(cmd); +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); +- } + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); + +- ret = transport_put_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); ++ } ++ /* ++ * If the task has been internally aborted due to TMR ABORT_TASK ++ * or LUN_RESET, target_core_tmr.c is responsible for performing ++ * the remaining calls to target_put_sess_cmd(), and not the ++ * callers of this function. ++ */ ++ if (aborted) { ++ pr_debug("Detected CMD_T_ABORTED for ITT: %u\n", ++ cmd->se_tfo->get_task_tag(cmd)); ++ wait_for_completion(&cmd->cmd_wait_comp); ++ cmd->se_tfo->release_cmd(cmd); ++ ret = 1; + } + return ret; + } +@@ -2504,25 +2514,45 @@ out: + } + EXPORT_SYMBOL(target_get_sess_cmd); + ++static void target_free_cmd_mem(struct se_cmd *cmd) ++{ ++ transport_free_pages(cmd); ++ ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) ++ core_tmr_release_req(cmd->se_tmr_req); ++ if (cmd->t_task_cdb != cmd->__t_task_cdb) ++ kfree(cmd->t_task_cdb); ++} ++ + static void target_release_cmd_kref(struct kref *kref) + __releases(&se_cmd->se_sess->sess_cmd_lock) + { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; ++ bool fabric_stop; + + if (list_empty(&se_cmd->se_cmd_list)) { + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return; + } +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { ++ ++ spin_lock(&se_cmd->t_state_lock); ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ if (se_cmd->cmd_wait_set || fabric_stop) { ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); + ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + } + +@@ -2534,6 +2564,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) + struct se_session *se_sess = se_cmd->se_sess; + + if (!se_sess) { ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } +@@ -2551,6 +2582,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + { + struct se_cmd *se_cmd; + unsigned long flags; ++ int rc; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess->sess_tearing_down) { +@@ -2560,8 +2592,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) +- se_cmd->cmd_wait_set = 1; ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref); ++ if (rc) { ++ se_cmd->cmd_wait_set = 1; ++ spin_lock(&se_cmd->t_state_lock); ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ spin_unlock(&se_cmd->t_state_lock); ++ } ++ } + + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } +@@ -2574,15 +2613,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + { + struct se_cmd *se_cmd, *tmp_cmd; + unsigned long flags; ++ bool tas; + + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" + " %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags); ++ tas = (se_cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); ++ ++ if (!target_put_sess_cmd(se_cmd)) { ++ if (tas) ++ target_put_sess_cmd(se_cmd); ++ } ++ + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, +@@ -2625,34 +2674,38 @@ int transport_clear_lun_ref(struct se_lun *lun) + return 0; + } + +-/** +- * transport_wait_for_tasks - wait for completion to occur +- * @cmd: command to wait +- * +- * Called from frontend fabric context to wait for storage engine +- * to pause and/or release frontend generated struct se_cmd. +- */ +-bool transport_wait_for_tasks(struct se_cmd *cmd) ++static bool ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, ++ bool *aborted, bool *tas, unsigned long *flags) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { +- unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ ++ if (fabric_stop) ++ cmd->transport_state |= CMD_T_FABRIC_STOP; ++ ++ if (cmd->transport_state & CMD_T_ABORTED) ++ *aborted = true; ++ ++ if (cmd->transport_state & CMD_T_TAS) ++ *tas = true; ++ + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ if (!(cmd->transport_state & CMD_T_ACTIVE)) ++ return false; ++ ++ if (fabric_stop && *aborted) + return false; +- } + + cmd->transport_state |= CMD_T_STOP; + +@@ -2661,20 +2714,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + wait_for_completion(&cmd->t_transport_stop_comp); + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ spin_lock_irqsave(&cmd->t_state_lock, *flags); + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); + + pr_debug("wait_for_tasks: Stopped wait_for_completion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); + ++ return true; ++} ++ ++/** ++ * transport_wait_for_tasks - wait for completion to occur ++ * @cmd: command to wait ++ * ++ * Called from frontend fabric context to wait for storage engine ++ * to pause and/or release frontend generated struct se_cmd. ++ */ ++bool transport_wait_for_tasks(struct se_cmd *cmd) ++{ ++ unsigned long flags; ++ bool ret, aborted = false, tas = false; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +- return true; ++ return ret; + } + EXPORT_SYMBOL(transport_wait_for_tasks); + +@@ -2960,8 +3030,13 @@ after_reason: + } + EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ + if (!(cmd->transport_state & CMD_T_ABORTED)) + return 0; + +@@ -2969,19 +3044,37 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) + * If cmd has been aborted but either no status is to be sent or it has + * already been sent, just return + */ +- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { ++ if (send_status) ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + return 1; ++ } + +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", +- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" ++ " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], ++ cmd->se_tfo->get_task_tag(cmd)); + + cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); ++ ++ spin_unlock_irq(&cmd->t_state_lock); + cmd->se_tfo->queue_status(cmd); ++ spin_lock_irq(&cmd->t_state_lock); + + return 1; + } ++ ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++{ ++ int ret; ++ ++ spin_lock_irq(&cmd->t_state_lock); ++ ret = __transport_check_aborted_status(cmd, send_status); ++ spin_unlock_irq(&cmd->t_state_lock); ++ ++ return ret; ++} + EXPORT_SYMBOL(transport_check_aborted_status); + + void transport_send_task_abort(struct se_cmd *cmd) +@@ -3003,11 +3096,17 @@ void transport_send_task_abort(struct se_cmd *cmd) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_tfo->write_pending_status(cmd) != 0) { +- cmd->transport_state |= CMD_T_ABORTED; ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto send_abort; ++ } + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + } ++send_abort: + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + + transport_lun_remove_cmd(cmd); +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index ad6c87a4653c..fbc6285905a6 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -118,7 +118,7 @@ static void ci_otg_work(struct work_struct *work) + int ci_hdrc_otg_init(struct ci_hdrc *ci) + { + INIT_WORK(&ci->work, ci_otg_work); +- ci->wq = create_singlethread_workqueue("ci_otg"); ++ ci->wq = create_freezable_workqueue("ci_otg"); + if (!ci->wq) { + dev_err(ci->dev, "can't create workqueue\n"); + return -ENODEV; +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 173edd4ca20e..be245d073f15 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -829,7 +829,6 @@ struct dwc3 { + unsigned pullups_connected:1; + unsigned resize_fifos:1; + unsigned setup_packet_pending:1; +- unsigned start_config_issued:1; + unsigned three_stage_setup:1; + unsigned usb3_lpm_capable:1; + +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c +index 06ecd1e6871c..00f2c456f94b 100644 +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -545,7 +545,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) + int ret; + u32 reg; + +- dwc->start_config_issued = false; + cfg = le16_to_cpu(ctrl->wValue); + + switch (state) { +@@ -727,10 +726,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); + ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); + break; +- case USB_REQ_SET_INTERFACE: +- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); +- dwc->start_config_issued = false; +- /* Fall through */ + default: + dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); + ret = dwc3_ep0_delegate_req(dwc, ctrl); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 6fbf461d523c..b886226be241 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) + dep->trb_pool_dma = 0; + } + ++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); ++ ++/** ++ * dwc3_gadget_start_config - Configure EP resources ++ * @dwc: pointer to our controller context structure ++ * @dep: endpoint that is being enabled ++ * ++ * The assignment of transfer resources cannot perfectly follow the ++ * data book due to the fact that the controller driver does not have ++ * all knowledge of the configuration in advance. It is given this ++ * information piecemeal by the composite gadget framework after every ++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook ++ * programming model in this scenario can cause errors. For two ++ * reasons: ++ * ++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION ++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of ++ * multiple interfaces. ++ * ++ * 2) The databook does not mention doing more DEPXFERCFG for new ++ * endpoint on alt setting (8.1.6). ++ * ++ * The following simplified method is used instead: ++ * ++ * All hardware endpoints can be assigned a transfer resource and this ++ * setting will stay persistent until either a core reset or ++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and ++ * do DEPXFERCFG for every hardware endpoint as well. We are ++ * guaranteed that there are as many transfer resources as endpoints. ++ * ++ * This function is called for each endpoint when it is being enabled ++ * but is triggered only when called for EP0-out, which always happens ++ * first, and which should only happen in one of the above conditions. ++ */ + static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) + { + struct dwc3_gadget_ep_cmd_params params; + u32 cmd; ++ int i; ++ int ret; ++ ++ if (dep->number) ++ return 0; + + memset(¶ms, 0x00, sizeof(params)); ++ cmd = DWC3_DEPCMD_DEPSTARTCFG; + +- if (dep->number != 1) { +- cmd = DWC3_DEPCMD_DEPSTARTCFG; +- /* XferRscIdx == 0 for ep0 and 2 for the remaining */ +- if (dep->number > 1) { +- if (dwc->start_config_issued) +- return 0; +- dwc->start_config_issued = true; +- cmd |= DWC3_DEPCMD_PARAM(2); +- } ++ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); ++ if (ret) ++ return ret; + +- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); ++ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { ++ struct dwc3_ep *dep = dwc->eps[i]; ++ ++ if (!dep) ++ continue; ++ ++ ret = dwc3_gadget_set_xfer_resource(dwc, dep); ++ if (ret) ++ return ret; + } + + return 0; +@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, + struct dwc3_trb *trb_st_hw; + struct dwc3_trb *trb_link; + +- ret = dwc3_gadget_set_xfer_resource(dwc, dep); +- if (ret) +- return ret; +- + dep->endpoint.desc = desc; + dep->comp_desc = comp_desc; + dep->type = usb_endpoint_type(desc); +@@ -1589,8 +1627,6 @@ static int dwc3_gadget_start(struct usb_gadget *g, + } + dwc3_writel(dwc->regs, DWC3_DCFG, reg); + +- dwc->start_config_issued = false; +- + /* Start with SuperSpeed Default */ + dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + +@@ -2167,7 +2203,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + dwc3_disconnect_gadget(dwc); +- dwc->start_config_issued = false; + + dwc->gadget.speed = USB_SPEED_UNKNOWN; + dwc->setup_packet_pending = false; +@@ -2218,7 +2253,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) + + dwc3_stop_active_transfers(dwc); + dwc3_clear_stall_all_ep(dwc); +- dwc->start_config_issued = false; + + /* Reset device address to zero */ + reg = dwc3_readl(dwc->regs, DWC3_DCFG); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 1dd9919081f8..7a76fe4c2f9e 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -162,6 +162,9 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 88540596973f..ce3d6af977b7 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -318,6 +319,7 @@ static void option_instat_callback(struct urb *urb); + #define TOSHIBA_PRODUCT_G450 0x0d45 + + #define ALINK_VENDOR_ID 0x1e0e ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ + #define ALINK_PRODUCT_PH300 0x9100 + #define ALINK_PRODUCT_3GU 0x9200 + +@@ -610,6 +612,10 @@ static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info simcom_sim7100e_blacklist = { ++ .reserved = BIT(5) | BIT(6), ++}; ++ + static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +@@ -1130,6 +1136,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +@@ -1137,6 +1145,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1188,6 +1198,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +@@ -1657,6 +1669,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist + }, +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 514fa91cf74e..f0a2ad15a992 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -155,14 +155,17 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ +- {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */ +- {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */ ++ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ ++ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ ++ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ ++ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ ++ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + + /* Huawei devices */ + {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index e9851add6f4e..c0f4ab83aaa8 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data, + info.num_regions = VFIO_PCI_NUM_REGIONS; + info.num_irqs = VFIO_PCI_NUM_IRQS; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct pci_dev *pdev = vdev->pdev; +@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data, + return -EINVAL; + } + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; +@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data, + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c +index abcff7a1aa66..973b24ffe332 100644 +--- a/drivers/vfio/platform/vfio_platform_common.c ++++ b/drivers/vfio/platform/vfio_platform_common.c +@@ -163,7 +163,8 @@ static long vfio_platform_ioctl(void *device_data, + info.num_regions = vdev->num_regions; + info.num_irqs = vdev->num_irqs; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct vfio_region_info info; +@@ -184,7 +185,8 @@ static long vfio_platform_ioctl(void *device_data, + info.size = vdev->regions[info.index].size; + info.flags = vdev->regions[info.index].flags; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; +@@ -203,7 +205,8 @@ static long vfio_platform_ioctl(void *device_data, + info.flags = vdev->irqs[info.index].flags; + info.count = vdev->irqs[info.index].count; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 57d8c37a002b..092216540756 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -986,7 +986,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, + + info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_IOMMU_MAP_DMA) { + struct vfio_iommu_type1_dma_map map; +@@ -1019,7 +1020,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, + if (ret) + return ret; + +- return copy_to_user((void __user *)arg, &unmap, minsz); ++ return copy_to_user((void __user *)arg, &unmap, minsz) ? ++ -EFAULT : 0; + } + + return -ENOTTY; +diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h +index 58e38d586f52..4d529f3e40df 100644 +--- a/drivers/xen/xen-pciback/pciback.h ++++ b/drivers/xen/xen-pciback/pciback.h +@@ -37,6 +37,7 @@ struct xen_pcibk_device { + struct xen_pci_sharedinfo *sh_info; + unsigned long flags; + struct work_struct op_work; ++ struct xen_pci_op op; + }; + + struct xen_pcibk_dev_data { +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c +index c4a0666de6f5..9cf4653b6bd7 100644 +--- a/drivers/xen/xen-pciback/pciback_ops.c ++++ b/drivers/xen/xen-pciback/pciback_ops.c +@@ -197,13 +197,27 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, + struct xen_pcibk_dev_data *dev_data; + int i, result; + struct msix_entry *entries; ++ u16 cmd; + + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", + pci_name(dev)); ++ + if (op->value > SH_INFO_MAX_VEC) + return -EINVAL; + ++ if (dev->msix_enabled) ++ return -EALREADY; ++ ++ /* ++ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able ++ * to access the BARs where the MSI-X entries reside. ++ * But VF devices are unique in which the PF needs to be checked. ++ */ ++ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd); ++ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) ++ return -ENXIO; ++ + entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); + if (entries == NULL) + return -ENOMEM; +@@ -298,9 +312,14 @@ void xen_pcibk_do_op(struct work_struct *data) + container_of(data, struct xen_pcibk_device, op_work); + struct pci_dev *dev; + struct xen_pcibk_dev_data *dev_data = NULL; +- struct xen_pci_op *op = &pdev->sh_info->op; ++ struct xen_pci_op *op = &pdev->op; + int test_intx = 0; ++#ifdef CONFIG_PCI_MSI ++ unsigned int nr = 0; ++#endif + ++ *op = pdev->sh_info->op; ++ barrier(); + dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); + + if (dev == NULL) +@@ -326,6 +345,7 @@ void xen_pcibk_do_op(struct work_struct *data) + op->err = xen_pcibk_disable_msi(pdev, dev, op); + break; + case XEN_PCI_OP_enable_msix: ++ nr = op->value; + op->err = xen_pcibk_enable_msix(pdev, dev, op); + break; + case XEN_PCI_OP_disable_msix: +@@ -342,6 +362,17 @@ void xen_pcibk_do_op(struct work_struct *data) + if ((dev_data->enable_intx != test_intx)) + xen_pcibk_control_isr(dev, 0 /* no reset */); + } ++ pdev->sh_info->op.err = op->err; ++ pdev->sh_info->op.value = op->value; ++#ifdef CONFIG_PCI_MSI ++ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { ++ unsigned int i; ++ ++ for (i = 0; i < nr; i++) ++ pdev->sh_info->op.msix_entries[i].vector = ++ op->msix_entries[i].vector; ++ } ++#endif + /* Tell the driver domain that we're done. */ + wmb(); + clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c +index b7f51504f85a..c561d530be2e 100644 +--- a/drivers/xen/xen-scsiback.c ++++ b/drivers/xen/xen-scsiback.c +@@ -941,12 +941,12 @@ out: + spin_unlock_irqrestore(&info->v2p_lock, flags); + + out_free: +- mutex_lock(&tpg->tv_tpg_mutex); +- tpg->tv_tpg_fe_count--; +- mutex_unlock(&tpg->tv_tpg_mutex); +- +- if (err) ++ if (err) { ++ mutex_lock(&tpg->tv_tpg_mutex); ++ tpg->tv_tpg_fe_count--; ++ mutex_unlock(&tpg->tv_tpg_mutex); + kfree(new); ++ } + + return err; + } +diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h +index 252f5c15806b..78a7b1d73354 100644 +--- a/fs/cifs/cifsfs.h ++++ b/fs/cifs/cifsfs.h +@@ -31,19 +31,15 @@ + * so that it will fit. We use hash_64 to convert the value to 31 bits, and + * then add 1, to ensure that we don't end up with a 0 as the value. + */ +-#if BITS_PER_LONG == 64 + static inline ino_t + cifs_uniqueid_to_ino_t(u64 fileid) + { ++ if ((sizeof(ino_t)) < (sizeof(u64))) ++ return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1; ++ + return (ino_t)fileid; ++ + } +-#else +-static inline ino_t +-cifs_uniqueid_to_ino_t(u64 fileid) +-{ +- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1; +-} +-#endif + + extern struct file_system_type cifs_fs_type; + extern const struct address_space_operations cifs_addr_ops; +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index f26ffbfc64d8..f1a5067d5494 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1395,11 +1395,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1413,10 +1412,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1445,6 +1454,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 894f259d3989..657a9c5c4fff 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -1042,21 +1042,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, + { + char *data_offset; + struct create_context *cc; +- unsigned int next = 0; ++ unsigned int next; ++ unsigned int remaining; + char *name; + + data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); ++ remaining = le32_to_cpu(rsp->CreateContextsLength); + cc = (struct create_context *)data_offset; +- do { +- cc = (struct create_context *)((char *)cc + next); ++ while (remaining >= sizeof(struct create_context)) { + name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) != 4 || +- strncmp(name, "RqLs", 4)) { +- next = le32_to_cpu(cc->Next); +- continue; +- } +- return server->ops->parse_lease_buf(cc, epoch); +- } while (next != 0); ++ if (le16_to_cpu(cc->NameLength) == 4 && ++ strncmp(name, "RqLs", 4) == 0) ++ return server->ops->parse_lease_buf(cc, epoch); ++ ++ next = le32_to_cpu(cc->Next); ++ if (!next) ++ break; ++ remaining -= next; ++ cc = (struct create_context *)((char *)cc + next); ++ } + + return 0; + } +diff --git a/fs/dcache.c b/fs/dcache.c +index 0046ab7d4f3d..10bce74c427f 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry) + return dentry->d_name.name != dentry->d_iname; + } + +-/* +- * Make sure other CPUs see the inode attached before the type is set. +- */ + static inline void __d_set_inode_and_type(struct dentry *dentry, + struct inode *inode, + unsigned type_flags) +@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry, + unsigned flags; + + dentry->d_inode = inode; +- smp_wmb(); + flags = READ_ONCE(dentry->d_flags); + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + flags |= type_flags; + WRITE_ONCE(dentry->d_flags, flags); + } + +-/* +- * Ideally, we want to make sure that other CPUs see the flags cleared before +- * the inode is detached, but this is really a violation of RCU principles +- * since the ordering suggests we should always set inode before flags. +- * +- * We should instead replace or discard the entire dentry - but that sucks +- * performancewise on mass deletion/rename. +- */ + static inline void __d_clear_type_and_inode(struct dentry *dentry) + { + unsigned flags = READ_ONCE(dentry->d_flags); + + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + WRITE_ONCE(dentry->d_flags, flags); +- smp_wmb(); + dentry->d_inode = NULL; + } + +@@ -322,17 +309,17 @@ static void dentry_free(struct dentry *dentry) + } + + /** +- * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups ++ * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups + * @dentry: the target dentry + * After this call, in-progress rcu-walk path lookup will fail. This + * should be called after unhashing, and after changing d_inode (if + * the dentry has not already been unhashed). + */ +-static inline void dentry_rcuwalk_barrier(struct dentry *dentry) ++static inline void dentry_rcuwalk_invalidate(struct dentry *dentry) + { +- assert_spin_locked(&dentry->d_lock); +- /* Go through a barrier */ +- write_seqcount_barrier(&dentry->d_seq); ++ lockdep_assert_held(&dentry->d_lock); ++ /* Go through am invalidation barrier */ ++ write_seqcount_invalidate(&dentry->d_seq); + } + + /* +@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry) + __releases(dentry->d_inode->i_lock) + { + struct inode *inode = dentry->d_inode; ++ ++ raw_write_seqcount_begin(&dentry->d_seq); + __d_clear_type_and_inode(dentry); + hlist_del_init(&dentry->d_u.d_alias); +- dentry_rcuwalk_barrier(dentry); ++ raw_write_seqcount_end(&dentry->d_seq); + spin_unlock(&dentry->d_lock); + spin_unlock(&inode->i_lock); + if (!inode->i_nlink) +@@ -494,7 +483,7 @@ void __d_drop(struct dentry *dentry) + __hlist_bl_del(&dentry->d_hash); + dentry->d_hash.pprev = NULL; + hlist_bl_unlock(b); +- dentry_rcuwalk_barrier(dentry); ++ dentry_rcuwalk_invalidate(dentry); + } + } + EXPORT_SYMBOL(__d_drop); +@@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) + spin_lock(&dentry->d_lock); + if (inode) + hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); ++ raw_write_seqcount_begin(&dentry->d_seq); + __d_set_inode_and_type(dentry, inode, add_flags); +- dentry_rcuwalk_barrier(dentry); ++ raw_write_seqcount_end(&dentry->d_seq); + spin_unlock(&dentry->d_lock); + fsnotify_d_instantiate(dentry, inode); + } +diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c +index 9e92c9c2d319..b5f3cc7274f6 100644 +--- a/fs/hpfs/namei.c ++++ b/fs/hpfs/namei.c +@@ -377,12 +377,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) + struct inode *inode = d_inode(dentry); + dnode_secno dno; + int r; +- int rep = 0; + int err; + + hpfs_lock(dir->i_sb); + hpfs_adjust_length(name, &len); +-again: ++ + err = -ENOENT; + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); + if (!de) +@@ -402,33 +401,9 @@ again: + hpfs_error(dir->i_sb, "there was error when removing dirent"); + err = -EFSERROR; + break; +- case 2: /* no space for deleting, try to truncate file */ +- ++ case 2: /* no space for deleting */ + err = -ENOSPC; +- if (rep++) +- break; +- +- dentry_unhash(dentry); +- if (!d_unhashed(dentry)) { +- hpfs_unlock(dir->i_sb); +- return -ENOSPC; +- } +- if (generic_permission(inode, MAY_WRITE) || +- !S_ISREG(inode->i_mode) || +- get_write_access(inode)) { +- d_rehash(dentry); +- } else { +- struct iattr newattrs; +- /*pr_info("truncating file before delete.\n");*/ +- newattrs.ia_size = 0; +- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; +- err = notify_change(dentry, &newattrs, NULL); +- put_write_access(inode); +- if (!err) +- goto again; +- } +- hpfs_unlock(dir->i_sb); +- return -ENOSPC; ++ break; + default: + drop_nlink(inode); + err = 0; +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index f509f62e12f6..3361979d728c 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/namei.c b/fs/namei.c +index ccd7f98d85b9..f3cc848da8bc 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1619,10 +1619,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path, + if (err < 0) + goto out_err; + +- inode = path->dentry->d_inode; + err = -ENOENT; + if (d_is_negative(path->dentry)) + goto out_path_put; ++ inode = path->dentry->d_inode; + } + + if (should_follow_link(path->dentry, follow)) { +@@ -3078,6 +3078,7 @@ retry_lookup: + path_to_nameidata(path, nd); + goto out; + } ++ inode = path->dentry->d_inode; + finish_lookup: + /* we _can_ be in RCU mode here */ + if (should_follow_link(path->dentry, !symlink_ok)) { +@@ -3152,6 +3153,10 @@ opened: + goto exit_fput; + } + out: ++ if (unlikely(error > 0)) { ++ WARN_ON(1); ++ error = -EINVAL; ++ } + if (got_write) + mnt_drop_write(nd->path.mnt); + path_put(&save_parent); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 2c4f41c34366..84706204cc33 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2331,9 +2331,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + dentry = d_add_unique(dentry, igrab(state->inode)); + if (dentry == NULL) { + dentry = opendata->dentry; +- } else if (dentry != ctx->dentry) { ++ } else { + dput(ctx->dentry); +- ctx->dentry = dget(dentry); ++ ctx->dentry = dentry; + } + nfs_set_verifier(dentry, + nfs_save_change_attribute(d_inode(opendata->dir))); +diff --git a/include/linux/ata.h b/include/linux/ata.h +index 5dfbcd8887bb..2e5fb1c31251 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -487,8 +487,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index 167ec0934049..ca9df4521734 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -408,9 +408,7 @@ static inline bool d_mountpoint(const struct dentry *dentry) + */ + static inline unsigned __d_entry_type(const struct dentry *dentry) + { +- unsigned type = READ_ONCE(dentry->d_flags); +- smp_rmb(); +- return type & DCACHE_ENTRY_TYPE; ++ return dentry->d_flags & DCACHE_ENTRY_TYPE; + } + + static inline bool d_is_miss(const struct dentry *dentry) +diff --git a/include/linux/libata.h b/include/linux/libata.h +index e0e33787c485..11c2dd114732 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -717,7 +717,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index b95f914ce083..150f43a9149c 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -540,9 +540,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + + static inline loff_t nfs_size_to_loff_t(__u64 size) + { +- if (size > (__u64) OFFSET_MAX - 1) +- return OFFSET_MAX - 1; +- return (loff_t) size; ++ return min_t(u64, size, OFFSET_MAX); + } + + static inline ino_t +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h +index 5f68d0a391ce..c07e3a536099 100644 +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -266,13 +266,13 @@ static inline void write_seqcount_end(seqcount_t *s) + } + + /** +- * write_seqcount_barrier - invalidate in-progress read-side seq operations ++ * write_seqcount_invalidate - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t + * +- * After write_seqcount_barrier, no read-side seq operations will complete ++ * After write_seqcount_invalidate, no read-side seq operations will complete + * successfully and see data older than this. + */ +-static inline void write_seqcount_barrier(seqcount_t *s) ++static inline void write_seqcount_invalidate(seqcount_t *s) + { + smp_wmb(); + s->sequence+=2; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 480e9f82dfea..2b40a1fab293 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -167,6 +167,7 @@ enum se_cmd_flags_table { + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, + SCF_COMPARE_AND_WRITE = 0x00080000, + SCF_COMPARE_AND_WRITE_POST = 0x00100000, ++ SCF_ACK_KREF = 0x00400000, + }; + + /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ +@@ -522,7 +523,7 @@ struct se_cmd { + sense_reason_t (*execute_cmd)(struct se_cmd *); + sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, + u32, enum dma_data_direction); +- sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); ++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); + + unsigned char *t_task_cdb; + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; +@@ -537,6 +538,8 @@ struct se_cmd { + #define CMD_T_DEV_ACTIVE (1 << 7) + #define CMD_T_REQUEST_STOP (1 << 8) + #define CMD_T_BUSY (1 << 9) ++#define CMD_T_TAS (1 << 10) ++#define CMD_T_FABRIC_STOP (1 << 11) + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index c4de47fc5cca..f69ec1295b0b 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -683,7 +683,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) + * The ftrace subsystem is for showing formats only. + * They can not be enabled or disabled via the event files. + */ +- if (call->class && call->class->reg) ++ if (call->class && call->class->reg && ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) + return file; + } + +diff --git a/mm/memory.c b/mm/memory.c +index 2a9e09870c20..701d9ad45c46 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3363,8 +3363,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + if (unlikely(pmd_none(*pmd)) && + unlikely(__pte_alloc(mm, vma, pmd, address))) + return VM_FAULT_OOM; +- /* if an huge pmd materialized from under us just retry later */ +- if (unlikely(pmd_trans_huge(*pmd))) ++ /* ++ * If a huge pmd materialized under us just retry later. Use ++ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd ++ * didn't become pmd_trans_huge under us and then back to pmd_none, as ++ * a result of MADV_DONTNEED running immediately after a huge pmd fault ++ * in a different thread of this mm, in turn leading to a misleading ++ * pmd_trans_huge() retval. All we have to ensure is that it is a ++ * regular pmd that we can walk with pte_offset_map() and we can do that ++ * through an atomic read in C, which is what pmd_trans_unstable() ++ * provides. ++ */ ++ if (unlikely(pmd_trans_unstable(pmd))) + return 0; + /* + * A regular pmd is established and it can't morph into a huge pmd +diff --git a/mm/migrate.c b/mm/migrate.c +index 2c37b1a44a8c..8c4841a6dc4c 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1557,7 +1557,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, + (GFP_HIGHUSER_MOVABLE | + __GFP_THISNODE | __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_NOWARN) & +- ~GFP_IOFS, 0); ++ ~__GFP_WAIT, 0); + + return newpage; + } +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index e51af69c61bf..84201c21705e 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -1203,6 +1203,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, + return new_piece; + } + ++static size_t sizeof_footer(struct ceph_connection *con) ++{ ++ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? ++ sizeof(struct ceph_msg_footer) : ++ sizeof(struct ceph_msg_footer_old); ++} ++ + static void prepare_message_data(struct ceph_msg *msg, u32 data_len) + { + BUG_ON(!msg); +@@ -2326,9 +2333,9 @@ static int read_partial_message(struct ceph_connection *con) + ceph_pr_addr(&con->peer_addr.in_addr), + seq, con->in_seq + 1); + con->in_base_pos = -front_len - middle_len - data_len - +- sizeof(m->footer); ++ sizeof_footer(con); + con->in_tag = CEPH_MSGR_TAG_READY; +- return 0; ++ return 1; + } else if ((s64)seq - (s64)con->in_seq > 1) { + pr_err("read_partial_message bad seq %lld expected %lld\n", + seq, con->in_seq + 1); +@@ -2358,10 +2365,10 @@ static int read_partial_message(struct ceph_connection *con) + /* skip this message */ + dout("alloc_msg said skip message\n"); + con->in_base_pos = -front_len - middle_len - data_len - +- sizeof(m->footer); ++ sizeof_footer(con); + con->in_tag = CEPH_MSGR_TAG_READY; + con->in_seq++; +- return 0; ++ return 1; + } + + BUG_ON(!con->in_msg); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 36b9ac48b8fb..06bf4010d3ed 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb) + if (skb->pkt_type != PACKET_HOST) + goto drop; + ++ if (unlikely(skb->sk)) ++ goto drop; ++ + if (skb_warn_if_lro(skb)) + goto drop; + +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index 2928afffbb81..8d79e70bd978 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -1218,7 +1218,7 @@ int qword_get(char **bpp, char *dest, int bufsize) + if (bp[0] == '\\' && bp[1] == 'x') { + /* HEX STRING */ + bp += 2; +- while (len < bufsize) { ++ while (len < bufsize - 1) { + int h, l; + + h = hex_to_bin(bp[0]); +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index b9c0910fb8c4..0608f216f359 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index 9630e9f72b7b..1f64ab0c2a95 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream + return err; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */ ++static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, ++ struct snd_pcm_channel_info __user *src); ++#define snd_pcm_ioctl_channel_info_x32(s, p) \ ++ snd_pcm_channel_info_user(s, p) ++#endif /* CONFIG_X86_X32 */ ++ + struct snd_pcm_status32 { + s32 state; + struct compat_timespec trigger_tstamp; +@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream, + return err; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_pcm_status_x32 { ++ s32 state; ++ u32 rsvd; /* alignment */ ++ struct timespec trigger_tstamp; ++ struct timespec tstamp; ++ u32 appl_ptr; ++ u32 hw_ptr; ++ s32 delay; ++ u32 avail; ++ u32 avail_max; ++ u32 overrange; ++ s32 suspended_state; ++ u32 audio_tstamp_data; ++ struct timespec audio_tstamp; ++ struct timespec driver_tstamp; ++ u32 audio_tstamp_accuracy; ++ unsigned char reserved[52-2*sizeof(struct timespec)]; ++} __packed; ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream, ++ struct snd_pcm_status_x32 __user *src, ++ bool ext) ++{ ++ struct snd_pcm_status status; ++ int err; ++ ++ memset(&status, 0, sizeof(status)); ++ /* ++ * with extension, parameters are read/write, ++ * get audio_tstamp_data from user, ++ * ignore rest of status structure ++ */ ++ if (ext && get_user(status.audio_tstamp_data, ++ (u32 __user *)(&src->audio_tstamp_data))) ++ return -EFAULT; ++ err = snd_pcm_status(substream, &status); ++ if (err < 0) ++ return err; ++ ++ if (clear_user(src, sizeof(*src))) ++ return -EFAULT; ++ if (put_user(status.state, &src->state) || ++ put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) || ++ put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.appl_ptr, &src->appl_ptr) || ++ put_user(status.hw_ptr, &src->hw_ptr) || ++ put_user(status.delay, &src->delay) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.avail_max, &src->avail_max) || ++ put_user(status.overrange, &src->overrange) || ++ put_user(status.suspended_state, &src->suspended_state) || ++ put_user(status.audio_tstamp_data, &src->audio_tstamp_data) || ++ put_timespec(&status.audio_tstamp, &src->audio_tstamp) || ++ put_timespec(&status.driver_tstamp, &src->driver_tstamp) || ++ put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy)) ++ return -EFAULT; ++ ++ return err; ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* both for HW_PARAMS and HW_REFINE */ + static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream, + int refine, +@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_pcm_mmap_status_x32 { ++ s32 state; ++ s32 pad1; ++ u32 hw_ptr; ++ u32 pad2; /* alignment */ ++ struct timespec tstamp; ++ s32 suspended_state; ++ struct timespec audio_tstamp; ++} __packed; ++ ++struct snd_pcm_mmap_control_x32 { ++ u32 appl_ptr; ++ u32 avail_min; ++}; ++ ++struct snd_pcm_sync_ptr_x32 { ++ u32 flags; ++ u32 rsvd; /* alignment */ ++ union { ++ struct snd_pcm_mmap_status_x32 status; ++ unsigned char reserved[64]; ++ } s; ++ union { ++ struct snd_pcm_mmap_control_x32 control; ++ unsigned char reserved[64]; ++ } c; ++} __packed; ++ ++static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream, ++ struct snd_pcm_sync_ptr_x32 __user *src) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ volatile struct snd_pcm_mmap_status *status; ++ volatile struct snd_pcm_mmap_control *control; ++ u32 sflags; ++ struct snd_pcm_mmap_control scontrol; ++ struct snd_pcm_mmap_status sstatus; ++ snd_pcm_uframes_t boundary; ++ int err; ++ ++ if (snd_BUG_ON(!runtime)) ++ return -EINVAL; ++ ++ if (get_user(sflags, &src->flags) || ++ get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || ++ get_user(scontrol.avail_min, &src->c.control.avail_min)) ++ return -EFAULT; ++ if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) { ++ err = snd_pcm_hwsync(substream); ++ if (err < 0) ++ return err; ++ } ++ status = runtime->status; ++ control = runtime->control; ++ boundary = recalculate_boundary(runtime); ++ if (!boundary) ++ boundary = 0x7fffffff; ++ snd_pcm_stream_lock_irq(substream); ++ /* FIXME: we should consider the boundary for the sync from app */ ++ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) ++ control->appl_ptr = scontrol.appl_ptr; ++ else ++ scontrol.appl_ptr = control->appl_ptr % boundary; ++ if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) ++ control->avail_min = scontrol.avail_min; ++ else ++ scontrol.avail_min = control->avail_min; ++ sstatus.state = status->state; ++ sstatus.hw_ptr = status->hw_ptr % boundary; ++ sstatus.tstamp = status->tstamp; ++ sstatus.suspended_state = status->suspended_state; ++ sstatus.audio_tstamp = status->audio_tstamp; ++ snd_pcm_stream_unlock_irq(substream); ++ if (put_user(sstatus.state, &src->s.status.state) || ++ put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) || ++ put_timespec(&sstatus.tstamp, &src->s.status.tstamp) || ++ put_user(sstatus.suspended_state, &src->s.status.suspended_state) || ++ put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) || ++ put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || ++ put_user(scontrol.avail_min, &src->c.control.avail_min)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ + + /* + */ +@@ -487,7 +647,12 @@ enum { + SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32), + SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32), + SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32), +- ++#ifdef CONFIG_X86_X32 ++ SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info), ++ SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32), ++ SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32), ++ SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l + return snd_pcm_ioctl_rewind_compat(substream, argp); + case SNDRV_PCM_IOCTL_FORWARD32: + return snd_pcm_ioctl_forward_compat(substream, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_PCM_IOCTL_STATUS_X32: ++ return snd_pcm_status_user_x32(substream, argp, false); ++ case SNDRV_PCM_IOCTL_STATUS_EXT_X32: ++ return snd_pcm_status_user_x32(substream, argp, true); ++ case SNDRV_PCM_IOCTL_SYNC_PTR_X32: ++ return snd_pcm_ioctl_sync_ptr_x32(substream, argp); ++ case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32: ++ return snd_pcm_ioctl_channel_info_x32(substream, argp); ++#endif /* CONFIG_X86_X32 */ + } + + return -ENOIOCTLCMD; +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 72873a46afeb..4b53b8f2330f 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index b43924325249..d7b4d016b547 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index dad5b1123e46..0b9c18b2e45f 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index df34c78a6ced..91cc6897d595 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -3477,6 +3477,29 @@ static void gpio2_mic_hotkey_event(struct hda_codec *codec, + input_sync(spec->kb_dev); + } + ++static int alc_register_micmute_input_device(struct hda_codec *codec) ++{ ++ struct alc_spec *spec = codec->spec; ++ ++ spec->kb_dev = input_allocate_device(); ++ if (!spec->kb_dev) { ++ codec_err(codec, "Out of memory (input_allocate_device)\n"); ++ return -ENOMEM; ++ } ++ spec->kb_dev->name = "Microphone Mute Button"; ++ spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY); ++ spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE); ++ ++ if (input_register_device(spec->kb_dev)) { ++ codec_err(codec, "input_register_device failed\n"); ++ input_free_device(spec->kb_dev); ++ spec->kb_dev = NULL; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ + static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +@@ -3494,20 +3517,8 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec, + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { +- spec->kb_dev = input_allocate_device(); +- if (!spec->kb_dev) { +- codec_err(codec, "Out of memory (input_allocate_device)\n"); ++ if (alc_register_micmute_input_device(codec) != 0) + return; +- } +- spec->kb_dev->name = "Microphone Mute Button"; +- spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY); +- spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE); +- if (input_register_device(spec->kb_dev)) { +- codec_err(codec, "input_register_device failed\n"); +- input_free_device(spec->kb_dev); +- spec->kb_dev = NULL; +- return; +- } + + snd_hda_add_verbs(codec, gpio_init); + snd_hda_codec_write_cache(codec, codec->core.afg, 0, +@@ -3537,6 +3548,47 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec, + } + } + ++static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ /* Line2 = mic mute hotkey ++ GPIO2 = mic mute LED */ ++ static const struct hda_verb gpio_init[] = { ++ { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 }, ++ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 }, ++ {} ++ }; ++ ++ struct alc_spec *spec = codec->spec; ++ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ if (alc_register_micmute_input_device(codec) != 0) ++ return; ++ ++ snd_hda_add_verbs(codec, gpio_init); ++ snd_hda_jack_detect_enable_callback(codec, 0x1b, ++ gpio2_mic_hotkey_event); ++ ++ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook; ++ spec->gpio_led = 0; ++ spec->mute_led_polarity = 0; ++ spec->gpio_mic_led_mask = 0x04; ++ return; ++ } ++ ++ if (!spec->kb_dev) ++ return; ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PROBE: ++ spec->init_amp = ALC_INIT_DEFAULT; ++ break; ++ case HDA_FIXUP_ACT_FREE: ++ input_unregister_device(spec->kb_dev); ++ spec->kb_dev = NULL; ++ } ++} ++ + static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) + { +@@ -3720,6 +3772,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, + + static void alc_headset_mode_default(struct hda_codec *codec) + { ++ static struct coef_fw coef0225[] = { ++ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10), ++ {} ++ }; + static struct coef_fw coef0255[] = { + WRITE_COEF(0x45, 0xc089), + WRITE_COEF(0x45, 0xc489), +@@ -3761,6 +3817,9 @@ static void alc_headset_mode_default(struct hda_codec *codec) + }; + + switch (codec->core.vendor_id) { ++ case 0x10ec0225: ++ alc_process_coef_fw(codec, coef0225); ++ break; + case 0x10ec0255: + case 0x10ec0256: + alc_process_coef_fw(codec, coef0255); +@@ -4570,6 +4629,14 @@ enum { + ALC288_FIXUP_DISABLE_AAMIX, + ALC292_FIXUP_DELL_E7X, + ALC292_FIXUP_DISABLE_AAMIX, ++ ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC275_FIXUP_DELL_XPS, ++ ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, ++ ALC293_FIXUP_LENOVO_SPK_NOISE, ++ ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ++ ALC255_FIXUP_DELL_SPK_NOISE, ++ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC280_FIXUP_HP_HEADSET_MIC, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -5131,6 +5198,71 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC292_FIXUP_DISABLE_AAMIX + }, ++ [ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, ++ [ALC275_FIXUP_DELL_XPS] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ /* Enables internal speaker */ ++ {0x20, AC_VERB_SET_COEF_INDEX, 0x1f}, ++ {0x20, AC_VERB_SET_PROC_COEF, 0x00c0}, ++ {0x20, AC_VERB_SET_COEF_INDEX, 0x30}, ++ {0x20, AC_VERB_SET_PROC_COEF, 0x00b1}, ++ {} ++ } ++ }, ++ [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ /* Disable pass-through path for FRONT 14h */ ++ {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, ++ {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, ++ {} ++ }, ++ .chained = true, ++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE ++ }, ++ [ALC293_FIXUP_LENOVO_SPK_NOISE] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_disable_aamix, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI ++ }, ++ [ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc233_fixup_lenovo_line2_mic_hotkey, ++ }, ++ [ALC255_FIXUP_DELL_SPK_NOISE] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_disable_aamix, ++ .chained = true, ++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE ++ }, ++ [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ /* Disable pass-through path for FRONT 14h */ ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, ++ {} ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE ++ }, ++ [ALC280_FIXUP_HP_HEADSET_MIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_disable_aamix, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MIC, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -5142,10 +5274,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), + SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), ++ SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), ++ SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), + SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), +@@ -5169,6 +5303,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), ++ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -5228,6 +5364,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), ++ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -5275,6 +5412,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), ++ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), +@@ -5284,6 +5423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), +@@ -5365,6 +5505,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, + {} + }; ++#define ALC225_STANDARD_PINS \ ++ {0x12, 0xb7a60130}, \ ++ {0x21, 0x04211020} + + #define ALC255_STANDARD_PINS \ + {0x18, 0x411111f0}, \ +@@ -5414,7 +5557,20 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {0x1d, 0x40700001}, \ + {0x1e, 0x411111f0} + ++#define ALC298_STANDARD_PINS \ ++ {0x18, 0x411111f0}, \ ++ {0x19, 0x411111f0}, \ ++ {0x1a, 0x411111f0}, \ ++ {0x1e, 0x411111f0}, \ ++ {0x1f, 0x411111f0} ++ + static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC225_STANDARD_PINS, ++ {0x14, 0x901701a0}), ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC225_STANDARD_PINS, ++ {0x14, 0x901701b0}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, + ALC255_STANDARD_PINS, + {0x12, 0x40300000}, +@@ -5708,6 +5864,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x16, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}), ++ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC298_STANDARD_PINS, ++ {0x12, 0x90a60130}, ++ {0x13, 0x40000000}, ++ {0x14, 0x411111f0}, ++ {0x17, 0x90170140}, ++ {0x1d, 0x4068a36d}, ++ {0x21, 0x03211020}), + {} + }; + +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index c19e021ccf66..11246280945d 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2878,7 +2878,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2890,7 +2890,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index cb666c73712d..7f6190606f5e 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 37d8ababfc04..a4d03e5da3e0 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ ++ case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ +diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST +index 11ccbb22ea2b..13d0458afc71 100644 +--- a/tools/perf/MANIFEST ++++ b/tools/perf/MANIFEST +@@ -28,24 +28,20 @@ include/asm-generic/bitops/const_hweight.h + include/asm-generic/bitops/fls64.h + include/asm-generic/bitops/__fls.h + include/asm-generic/bitops/fls.h +-include/linux/const.h + include/linux/perf_event.h + include/linux/rbtree.h + include/linux/list.h + include/linux/hash.h + include/linux/stringify.h +-lib/find_next_bit.c + lib/hweight.c + lib/rbtree.c + include/linux/swab.h + arch/*/include/asm/unistd*.h +-arch/*/include/asm/perf_regs.h + arch/*/include/uapi/asm/unistd*.h + arch/*/include/uapi/asm/perf_regs.h + arch/*/lib/memcpy*.S + arch/*/lib/memset*.S + include/linux/poison.h +-include/linux/magic.h + include/linux/hw_breakpoint.h + include/linux/rbtree_augmented.h + include/uapi/linux/perf_event.h +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c +index 950064a0942d..934d56f6803c 100644 +--- a/virt/kvm/arm/vgic.c ++++ b/virt/kvm/arm/vgic.c +@@ -1602,8 +1602,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) + static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) + { + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; +- +- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; ++ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS); ++ int sz = nr_longs * sizeof(unsigned long); + vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); + vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); + vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c +index 44660aee335f..f84f5856520a 100644 +--- a/virt/kvm/async_pf.c ++++ b/virt/kvm/async_pf.c +@@ -169,7 +169,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, + * do alloc nowait since if we are going to sleep anyway we + * may as well sleep faulting in page + */ +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); + if (!work) + return 0; + |