diff options
author | Mike Pagano <mpagano@gentoo.org> | 2017-09-02 13:45:29 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2017-09-02 13:45:29 -0400 |
commit | df69a103f795031b5153434c19e868e81043ac2c (patch) | |
tree | c54e2265c4073c27e63d5afcbcb9dfc2bf99b7e1 | |
parent | Linux patch 4.9.46 (diff) | |
download | linux-patches-df69a103f795031b5153434c19e868e81043ac2c.tar.gz linux-patches-df69a103f795031b5153434c19e868e81043ac2c.tar.bz2 linux-patches-df69a103f795031b5153434c19e868e81043ac2c.zip |
Linux patch 4.9.474.9-48
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1046_linux-4.9.47.patch | 504 |
2 files changed, 508 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 9af1e8a7..9258add9 100644 --- a/0000_README +++ b/0000_README @@ -227,6 +227,10 @@ Patch: 1045_linux-4.9.46.patch From: http://www.kernel.org Desc: Linux 4.9.46 +Patch: 1046_linux-4.9.47.patch +From: http://www.kernel.org +Desc: Linux 4.9.47 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1046_linux-4.9.47.patch b/1046_linux-4.9.47.patch new file mode 100644 index 00000000..f1c88c80 --- /dev/null +++ b/1046_linux-4.9.47.patch @@ -0,0 +1,504 @@ +diff --git a/Makefile b/Makefile +index 846ef1b57a02..a0abbfc15a49 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 46 ++SUBLEVEL = 47 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 710511cadd50..0c060c5e844a 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -829,22 +829,22 @@ void stage2_unmap_vm(struct kvm *kvm) + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. +- * +- * Note we don't need locking here as this is only called when the VM is +- * destroyed, which can only be done once. + */ + void kvm_free_stage2_pgd(struct kvm *kvm) + { +- if (kvm->arch.pgd == NULL) +- return; ++ void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); +- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ if (kvm->arch.pgd) { ++ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ pgd = kvm->arch.pgd; ++ kvm->arch.pgd = NULL; ++ } + spin_unlock(&kvm->mmu_lock); + + /* Free the HW pgd, one page at a time */ +- free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); +- kvm->arch.pgd = NULL; ++ if (pgd) ++ free_pages_exact(pgd, S2_PGD_SIZE); + } + + static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index 394c61db5566..1d5890f19ca3 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next) + + void fpsimd_flush_thread(void) + { ++ preempt_disable(); + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); ++ preempt_enable(); + } + + /* +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index 0e90c7e0279c..fec5b1ce97f8 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -373,8 +373,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, + * signal first. We do not need to release the mmap_sem because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ +- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) ++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { ++ if (!user_mode(regs)) ++ goto no_context; + return 0; ++ } + + /* + * Major/minor page fault accounting is only done on the initial +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index d34bd370074b..6c5020163db0 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \ + static inline void outs##bwl(int port, const void *addr, unsigned long count) \ + { \ + asm volatile("rep; outs" #bwl \ +- : "+S"(addr), "+c"(count) : "d"(port)); \ ++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \ + } \ + \ + static inline void ins##bwl(int port, void *addr, unsigned long count) \ + { \ + asm volatile("rep; ins" #bwl \ +- : "+D"(addr), "+c"(count) : "d"(port)); \ ++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \ + } + + BUILDIO(b, b, char) +diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c +index 257a9eadd595..4ac6764f4897 100644 +--- a/drivers/net/wireless/intersil/p54/fwio.c ++++ b/drivers/net/wireless/intersil/p54/fwio.c +@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) + + entry += sizeof(__le16); + chan->pa_points_per_curve = 8; +- memset(chan->curve_data, 0, sizeof(*chan->curve_data)); ++ memset(chan->curve_data, 0, sizeof(chan->curve_data)); + memcpy(chan->curve_data, entry, + sizeof(struct p54_pa_curve_data_sample) * + min((u8)8, curve_data->points_per_channel)); +diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c +index 1910100638a2..00602abec0ea 100644 +--- a/drivers/scsi/isci/remote_node_context.c ++++ b/drivers/scsi/isci/remote_node_context.c +@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) + { + static const char * const strings[] = RNC_STATES; + ++ if (state >= ARRAY_SIZE(strings)) ++ return "UNKNOWN"; ++ + return strings[state]; + } + #undef C +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index f753df25ba34..fed37aabf828 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -142,6 +142,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + struct sg_device *parentdp; /* owning device */ + wait_queue_head_t read_wait; /* queue read until command done */ + rwlock_t rq_list_lock; /* protect access to list in req_arr */ ++ struct mutex f_mutex; /* protect against changes in this fd */ + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ + Sg_scatter_hold reserve; /* buffer held for this file descriptor */ +@@ -155,6 +156,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ ++ char res_in_use; /* 1 -> 'reserve' array in use */ + struct kref f_ref; + struct execute_work ew; + } Sg_fd; +@@ -198,7 +200,6 @@ static void sg_remove_sfp(struct kref *); + static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); + static Sg_request *sg_add_request(Sg_fd * sfp); + static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); +-static int sg_res_in_use(Sg_fd * sfp); + static Sg_device *sg_get_dev(int dev); + static void sg_device_destroy(struct kref *kref); + +@@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + } + buf += SZ_SG_HEADER; + __get_user(opcode, buf); ++ mutex_lock(&sfp->f_mutex); + if (sfp->next_cmd_len > 0) { + cmd_size = sfp->next_cmd_len; + sfp->next_cmd_len = 0; /* reset so only this write() effected */ +@@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + if ((opcode >= 0xc0) && old_hdr.twelve_byte) + cmd_size = 12; + } ++ mutex_unlock(&sfp->f_mutex); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); + /* Determine buffer size. */ +@@ -721,7 +724,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + sg_remove_request(sfp, srp); + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + } +- if (sg_res_in_use(sfp)) { ++ if (sfp->res_in_use) { + sg_remove_request(sfp, srp); + return -EBUSY; /* reserve buffer already being used */ + } +@@ -892,7 +895,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return result; + if (val) { + sfp->low_dma = 1; +- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { ++ if ((0 == sfp->low_dma) && !sfp->res_in_use) { + val = (int) sfp->reserve.bufflen; + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); +@@ -967,12 +970,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return -EINVAL; + val = min_t(int, val, + max_sectors_bytes(sdp->device->request_queue)); ++ mutex_lock(&sfp->f_mutex); + if (val != sfp->reserve.bufflen) { +- if (sg_res_in_use(sfp) || sfp->mmap_called) ++ if (sfp->mmap_called || ++ sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); + return -EBUSY; ++ } ++ + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); + } ++ mutex_unlock(&sfp->f_mutex); + return 0; + case SG_GET_RESERVED_SIZE: + val = min_t(int, sfp->reserve.bufflen, +@@ -1727,13 +1736,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) + md = &map_data; + + if (md) { +- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) ++ mutex_lock(&sfp->f_mutex); ++ if (dxfer_len <= rsv_schp->bufflen && ++ !sfp->res_in_use) { ++ sfp->res_in_use = 1; + sg_link_reserve(sfp, srp, dxfer_len); +- else { ++ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); ++ return -EBUSY; ++ } else { + res = sg_build_indirect(req_schp, sfp, dxfer_len); +- if (res) ++ if (res) { ++ mutex_unlock(&sfp->f_mutex); + return res; ++ } + } ++ mutex_unlock(&sfp->f_mutex); + + md->pages = req_schp->pages; + md->page_order = req_schp->page_order; +@@ -2024,6 +2042,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) + req_schp->sglist_len = 0; + sfp->save_scat_len = 0; + srp->res_used = 0; ++ /* Called without mutex lock to avoid deadlock */ ++ sfp->res_in_use = 0; + } + + static Sg_request * +@@ -2135,6 +2155,7 @@ sg_add_sfp(Sg_device * sdp) + rwlock_init(&sfp->rq_list_lock); + + kref_init(&sfp->f_ref); ++ mutex_init(&sfp->f_mutex); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; +@@ -2210,20 +2231,6 @@ sg_remove_sfp(struct kref *kref) + schedule_work(&sfp->ew.work); + } + +-static int +-sg_res_in_use(Sg_fd * sfp) +-{ +- const Sg_request *srp; +- unsigned long iflags; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- for (srp = sfp->headrp; srp; srp = srp->nextrp) +- if (srp->res_used) +- break; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return srp ? 1 : 0; +-} +- + #ifdef CONFIG_SCSI_PROC_FS + static int + sg_idr_max_id(int id, void *p, void *data) +diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c +index 6370a5efe343..defffa75ae1c 100644 +--- a/drivers/staging/wilc1000/linux_wlan.c ++++ b/drivers/staging/wilc1000/linux_wlan.c +@@ -269,23 +269,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) + + int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) + { +- int i = 0; +- int ret = -1; +- struct wilc_vif *vif; +- struct wilc *wilc; +- +- vif = netdev_priv(wilc_netdev); +- wilc = vif->wilc; ++ struct wilc_vif *vif = netdev_priv(wilc_netdev); + +- for (i = 0; i < wilc->vif_num; i++) +- if (wilc->vif[i]->ndev == wilc_netdev) { +- memcpy(wilc->vif[i]->bssid, bssid, 6); +- wilc->vif[i]->mode = mode; +- ret = 0; +- break; +- } ++ memcpy(vif->bssid, bssid, 6); ++ vif->mode = mode; + +- return ret; ++ return 0; + } + + int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) +@@ -1212,16 +1201,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size) + + void wilc_netdev_cleanup(struct wilc *wilc) + { +- int i = 0; +- struct wilc_vif *vif[NUM_CONCURRENT_IFC]; ++ int i; + +- if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { ++ if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) + unregister_inetaddr_notifier(&g_dev_notifier); + +- for (i = 0; i < NUM_CONCURRENT_IFC; i++) +- vif[i] = netdev_priv(wilc->vif[i]->ndev); +- } +- + if (wilc && wilc->firmware) { + release_firmware(wilc->firmware); + wilc->firmware = NULL; +@@ -1230,7 +1214,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) + if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { + for (i = 0; i < NUM_CONCURRENT_IFC; i++) + if (wilc->vif[i]->ndev) +- if (vif[i]->mac_opened) ++ if (wilc->vif[i]->mac_opened) + wilc_mac_close(wilc->vif[i]->ndev); + + for (i = 0; i < NUM_CONCURRENT_IFC; i++) { +@@ -1278,9 +1262,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, + + vif->idx = wl->vif_num; + vif->wilc = *wilc; ++ vif->ndev = ndev; + wl->vif[i] = vif; +- wl->vif[wl->vif_num]->ndev = ndev; +- wl->vif_num++; ++ wl->vif_num = i; + ndev->netdev_ops = &wilc_netdev_ops; + + { +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 2f9df37940a0..c51a49c9be70 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) + } + EXPORT_SYMBOL(__gcov_merge_icall_topn); + ++void __gcov_exit(void) ++{ ++ /* Unused. */ ++} ++EXPORT_SYMBOL(__gcov_exit); ++ + /** + * gcov_enable_events - enable event reporting through gcov_event() + * +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index 6a5c239c7669..46a18e72bce6 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include <linux/vmalloc.h> + #include "gcov.h" + +-#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) ++#if (__GNUC__ >= 7) ++#define GCOV_COUNTERS 9 ++#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 + #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 + #define GCOV_COUNTERS 9 +diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c +index 0374a596cffa..9aa0fccd5d43 100644 +--- a/kernel/locking/spinlock_debug.c ++++ b/kernel/locking/spinlock_debug.c +@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock) + lock->owner_cpu = -1; + } + +-static void __spin_lock_debug(raw_spinlock_t *lock) +-{ +- u64 i; +- u64 loops = loops_per_jiffy * HZ; +- +- for (i = 0; i < loops; i++) { +- if (arch_spin_trylock(&lock->raw_lock)) +- return; +- __delay(1); +- } +- /* lockup suspected: */ +- spin_dump(lock, "lockup suspected"); +-#ifdef CONFIG_SMP +- trigger_all_cpu_backtrace(); +-#endif +- +- /* +- * The trylock above was causing a livelock. Give the lower level arch +- * specific lock code a chance to acquire the lock. We have already +- * printed a warning/backtrace at this point. The non-debug arch +- * specific code might actually succeed in acquiring the lock. If it is +- * not successful, the end-result is the same - there is no forward +- * progress. +- */ +- arch_spin_lock(&lock->raw_lock); +-} +- ++/* ++ * We are now relying on the NMI watchdog to detect lockup instead of doing ++ * the detection here with an unfair lock which can cause problem of its own. ++ */ + void do_raw_spin_lock(raw_spinlock_t *lock) + { + debug_spin_lock_before(lock); +- if (unlikely(!arch_spin_trylock(&lock->raw_lock))) +- __spin_lock_debug(lock); ++ arch_spin_lock(&lock->raw_lock); + debug_spin_lock_after(lock); + } + +@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) + + #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) + +-#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ +-static void __read_lock_debug(rwlock_t *lock) +-{ +- u64 i; +- u64 loops = loops_per_jiffy * HZ; +- int print_once = 1; +- +- for (;;) { +- for (i = 0; i < loops; i++) { +- if (arch_read_trylock(&lock->raw_lock)) +- return; +- __delay(1); +- } +- /* lockup suspected: */ +- if (print_once) { +- print_once = 0; +- printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " +- "%s/%d, %p\n", +- raw_smp_processor_id(), current->comm, +- current->pid, lock); +- dump_stack(); +- } +- } +-} +-#endif +- + void do_raw_read_lock(rwlock_t *lock) + { + RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); +@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock) + lock->owner_cpu = -1; + } + +-#if 0 /* This can cause lockups */ +-static void __write_lock_debug(rwlock_t *lock) +-{ +- u64 i; +- u64 loops = loops_per_jiffy * HZ; +- int print_once = 1; +- +- for (;;) { +- for (i = 0; i < loops; i++) { +- if (arch_write_trylock(&lock->raw_lock)) +- return; +- __delay(1); +- } +- /* lockup suspected: */ +- if (print_once) { +- print_once = 0; +- printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " +- "%s/%d, %p\n", +- raw_smp_processor_id(), current->comm, +- current->pid, lock); +- dump_stack(); +- } +- } +-} +-#endif +- + void do_raw_write_lock(rwlock_t *lock) + { + debug_write_lock_before(lock); +diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c +index f344f76b6559..6b2e046a9c61 100644 +--- a/lib/lz4/lz4hc_compress.c ++++ b/lib/lz4/lz4hc_compress.c +@@ -131,7 +131,7 @@ static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, + #endif + int nbattempts = MAX_NB_ATTEMPTS; + size_t repl = 0, ml = 0; +- u16 delta; ++ u16 delta = 0; + + /* HC4 match finder */ + lz4hc_insert(hc4, ip); |