summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1114_linux-4.14.115.patch2061
2 files changed, 2065 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 10dfc5f9..937317e9 100644
--- a/0000_README
+++ b/0000_README
@@ -499,6 +499,10 @@ Patch: 1113_4.14.114.patch
From: http://www.kernel.org
Desc: Linux 4.14.114
+Patch: 1114_4.14.115.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.115
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1114_linux-4.14.115.patch b/1114_linux-4.14.115.patch
new file mode 100644
index 00000000..5ba140e8
--- /dev/null
+++ b/1114_linux-4.14.115.patch
@@ -0,0 +1,2061 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 7d8b17ce8804..94fa46d2d805 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2680,7 +2680,11 @@
+ nosmt=force: Force disable SMT, cannot be undone
+ via the sysfs control file.
+
+- nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
++ nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
++ check bypass). With this option data leaks are possible
++ in the system.
++
++ nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+ allow data leaks with this option, which is equivalent
+ to spectre_v2=off.
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index a054b5ad410a..828fcd6711b3 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -402,6 +402,7 @@ tcp_min_rtt_wlen - INTEGER
+ minimum RTT when it is moved to a longer path (e.g., due to traffic
+ engineering). A longer window makes the filter more resistant to RTT
+ inflations such as transient congestion. The unit is seconds.
++ Possible values: 0 - 86400 (1 day)
+ Default: 300
+
+ tcp_moderate_rcvbuf - BOOLEAN
+diff --git a/Makefile b/Makefile
+index 47a9f9883bdd..b27ffc1814e8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 114
++SUBLEVEL = 115
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 5f687ba1eaa7..8ca539bdac35 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1393,7 +1393,21 @@ ENTRY(efi_stub_entry)
+
+ @ Preserve return value of efi_entry() in r4
+ mov r4, r0
+- bl cache_clean_flush
++
++ @ our cache maintenance code relies on CP15 barrier instructions
++ @ but since we arrived here with the MMU and caches configured
++ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
++ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
++ @ the enable path will be executed on v7+ only.
++ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
++ tst r1, #(1 << 5) @ CP15BEN bit set?
++ bne 0f
++ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
++ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
++ ARM( .inst 0xf57ff06f @ v7+ isb )
++ THUMB( isb )
++
++0: bl cache_clean_flush
+ bl cache_off
+
+ @ Set parameters for booting zImage according to boot protocol
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 9ebe3e2403b1..c6b2e484d6c1 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -125,7 +125,7 @@ trace_a_syscall:
+ subu t1, v0, __NR_O32_Linux
+ move a1, v0
+ bnez t1, 1f /* __NR_syscall at offset 0 */
+- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
++ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+ .set pop
+
+ 1: jal syscall_trace_enter
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index c5290aecdf06..eb1f8f249dc3 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -242,6 +242,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ # Avoid indirect branches in kernel to deal with Spectre
+ ifdef CONFIG_RETPOLINE
+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
++ # Additionally, avoid generating expensive indirect jumps which
++ # are subject to retpolines for small number of switch cases.
++ # clang turns off jump table generation by default when under
++ # retpoline builds, however, gcc does not for x86. This has
++ # only been fixed starting from gcc stable version 8.4.0 and
++ # onwards, but not for older ones. See gcc bug #86952.
++ ifndef CONFIG_CC_IS_CLANG
++ KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
++ endif
+ endif
+
+ archscripts: scripts_basic
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index b9281f2725a6..e0b0399ff7ec 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -945,14 +945,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
++
++ mm = alloc->vma_vm_mm;
++ if (!mmget_not_zero(mm))
++ goto err_mmget;
++ if (!down_write_trylock(&mm->mmap_sem))
++ goto err_down_write_mmap_sem_failed;
+ vma = binder_alloc_get_vma(alloc);
+- if (vma) {
+- if (!mmget_not_zero(alloc->vma_vm_mm))
+- goto err_mmget;
+- mm = alloc->vma_vm_mm;
+- if (!down_write_trylock(&mm->mmap_sem))
+- goto err_down_write_mmap_sem_failed;
+- }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+@@ -965,10 +964,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ PAGE_SIZE);
+
+ trace_binder_unmap_user_end(alloc, index);
+-
+- up_write(&mm->mmap_sem);
+- mmput(mm);
+ }
++ up_write(&mm->mmap_sem);
++ mmput(mm);
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 24a3fb35614f..bd447de4a5b8 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -82,7 +82,6 @@
+
+ static DEFINE_IDR(loop_index_idr);
+ static DEFINE_MUTEX(loop_index_mutex);
+-static DEFINE_MUTEX(loop_ctl_mutex);
+
+ static int max_part;
+ static int part_shift;
+@@ -1019,7 +1018,7 @@ static int loop_clr_fd(struct loop_device *lo)
+ */
+ if (atomic_read(&lo->lo_refcnt) > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+ }
+
+@@ -1071,12 +1070,12 @@ static int loop_clr_fd(struct loop_device *lo)
+ if (!part_shift)
+ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+ loop_unprepare_queue(lo);
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ /*
+- * Need not hold loop_ctl_mutex to fput backing file.
+- * Calling fput holding loop_ctl_mutex triggers a circular
++ * Need not hold lo_ctl_mutex to fput backing file.
++ * Calling fput holding lo_ctl_mutex triggers a circular
+ * lock dependency possibility warning as fput can take
+- * bd_mutex which is usually taken before loop_ctl_mutex.
++ * bd_mutex which is usually taken before lo_ctl_mutex.
+ */
+ fput(filp);
+ return 0;
+@@ -1195,7 +1194,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ int ret;
+
+ if (lo->lo_state != Lo_bound) {
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return -ENXIO;
+ }
+
+@@ -1214,10 +1213,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ lo->lo_encrypt_key_size);
+ }
+
+- /* Drop loop_ctl_mutex while we call into the filesystem. */
++ /* Drop lo_ctl_mutex while we call into the filesystem. */
+ path = lo->lo_backing_file->f_path;
+ path_get(&path);
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
+ if (!ret) {
+ info->lo_device = huge_encode_dev(stat.dev);
+@@ -1309,7 +1308,7 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+ int err;
+
+ if (!arg) {
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
+@@ -1327,7 +1326,7 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
+ int err;
+
+ if (!arg) {
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
+@@ -1402,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ struct loop_device *lo = bdev->bd_disk->private_data;
+ int err;
+
+- mutex_lock_nested(&loop_ctl_mutex, 1);
++ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
+ switch (cmd) {
+ case LOOP_SET_FD:
+ err = loop_set_fd(lo, mode, bdev, arg);
+@@ -1411,7 +1410,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ err = loop_change_fd(lo, bdev, arg);
+ break;
+ case LOOP_CLR_FD:
+- /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
++ /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
+ err = loop_clr_fd(lo);
+ if (!err)
+ goto out_unlocked;
+@@ -1424,7 +1423,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ break;
+ case LOOP_GET_STATUS:
+ err = loop_get_status_old(lo, (struct loop_info __user *) arg);
+- /* loop_get_status() unlocks loop_ctl_mutex */
++ /* loop_get_status() unlocks lo_ctl_mutex */
+ goto out_unlocked;
+ case LOOP_SET_STATUS64:
+ err = -EPERM;
+@@ -1434,7 +1433,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ break;
+ case LOOP_GET_STATUS64:
+ err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
+- /* loop_get_status() unlocks loop_ctl_mutex */
++ /* loop_get_status() unlocks lo_ctl_mutex */
+ goto out_unlocked;
+ case LOOP_SET_CAPACITY:
+ err = -EPERM;
+@@ -1454,7 +1453,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ default:
+ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ }
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+
+ out_unlocked:
+ return err;
+@@ -1571,7 +1570,7 @@ loop_get_status_compat(struct loop_device *lo,
+ int err;
+
+ if (!arg) {
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
+@@ -1588,16 +1587,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+
+ switch(cmd) {
+ case LOOP_SET_STATUS:
+- mutex_lock(&loop_ctl_mutex);
++ mutex_lock(&lo->lo_ctl_mutex);
+ err = loop_set_status_compat(
+ lo, (const struct compat_loop_info __user *) arg);
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ case LOOP_GET_STATUS:
+- mutex_lock(&loop_ctl_mutex);
++ mutex_lock(&lo->lo_ctl_mutex);
+ err = loop_get_status_compat(
+ lo, (struct compat_loop_info __user *) arg);
+- /* loop_get_status() unlocks loop_ctl_mutex */
++ /* loop_get_status() unlocks lo_ctl_mutex */
+ break;
+ case LOOP_SET_CAPACITY:
+ case LOOP_CLR_FD:
+@@ -1641,7 +1640,7 @@ static void __lo_release(struct loop_device *lo)
+ if (atomic_dec_return(&lo->lo_refcnt))
+ return;
+
+- mutex_lock(&loop_ctl_mutex);
++ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ /*
+ * In autoclear mode, stop the loop thread
+@@ -1659,7 +1658,7 @@ static void __lo_release(struct loop_device *lo)
+ blk_mq_unfreeze_queue(lo->lo_queue);
+ }
+
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ }
+
+ static void lo_release(struct gendisk *disk, fmode_t mode)
+@@ -1705,10 +1704,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
+ struct loop_device *lo = ptr;
+ struct loop_func_table *xfer = data;
+
+- mutex_lock(&loop_ctl_mutex);
++ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_encryption == xfer)
+ loop_release_xfer(lo);
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+ }
+
+@@ -1881,6 +1880,7 @@ static int loop_add(struct loop_device **l, int i)
+ if (!part_shift)
+ disk->flags |= GENHD_FL_NO_PART_SCAN;
+ disk->flags |= GENHD_FL_EXT_DEVT;
++ mutex_init(&lo->lo_ctl_mutex);
+ atomic_set(&lo->lo_refcnt, 0);
+ lo->lo_number = i;
+ spin_lock_init(&lo->lo_lock);
+@@ -1993,19 +1993,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ ret = loop_lookup(&lo, parm);
+ if (ret < 0)
+ break;
+- mutex_lock(&loop_ctl_mutex);
++ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_state != Lo_unbound) {
+ ret = -EBUSY;
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ if (atomic_read(&lo->lo_refcnt) > 0) {
+ ret = -EBUSY;
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ lo->lo_disk->private_data = NULL;
+- mutex_unlock(&loop_ctl_mutex);
++ mutex_unlock(&lo->lo_ctl_mutex);
+ idr_remove(&loop_index_idr, lo->lo_number);
+ loop_remove(lo);
+ break;
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h
+index b2251752452b..dfc54ceba410 100644
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -54,6 +54,7 @@ struct loop_device {
+
+ spinlock_t lo_lock;
+ int lo_state;
++ struct mutex lo_ctl_mutex;
+ struct kthread_worker worker;
+ struct task_struct *worker_task;
+ bool use_dio;
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index a46776a84480..133178c9b2cf 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -488,18 +488,18 @@ struct zram_work {
+ struct zram *zram;
+ unsigned long entry;
+ struct bio *bio;
++ struct bio_vec bvec;
+ };
+
+ #if PAGE_SIZE != 4096
+ static void zram_sync_read(struct work_struct *work)
+ {
+- struct bio_vec bvec;
+ struct zram_work *zw = container_of(work, struct zram_work, work);
+ struct zram *zram = zw->zram;
+ unsigned long entry = zw->entry;
+ struct bio *bio = zw->bio;
+
+- read_from_bdev_async(zram, &bvec, entry, bio);
++ read_from_bdev_async(zram, &zw->bvec, entry, bio);
+ }
+
+ /*
+@@ -512,6 +512,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ {
+ struct zram_work work;
+
++ work.bvec = *bvec;
+ work.zram = zram;
+ work.entry = entry;
+ work.bio = bio;
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 9d6ce5051d8f..77b126525dac 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1332,6 +1332,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int residue;
++ bool cyclic;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE || !txstate)
+@@ -1339,10 +1340,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ residue = rcar_dmac_chan_get_residue(rchan, cookie);
++ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ /* if there's no residue, the cookie is complete */
+- if (!residue)
++ if (!residue && !cyclic)
+ return DMA_COMPLETE;
+
+ dma_set_residue(txstate, residue);
+diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
+index 14eb8a064562..da2d309574ba 100644
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -326,8 +326,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ bool *enabled, int width, int height)
+ {
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
++ unsigned long conn_configured, conn_seq, mask;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+- unsigned long conn_configured, conn_seq;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+@@ -345,9 +345,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+- conn_seq = GENMASK(count - 1, 0);
++ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+ retry:
++ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+@@ -360,8 +361,7 @@ retry:
+ if (conn_configured & BIT(i))
+ continue;
+
+- /* First pass, only consider tiled connectors */
+- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
++ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+@@ -465,10 +465,8 @@ retry:
+ conn_configured |= BIT(i);
+ }
+
+- if (conn_configured != conn_seq) { /* repeat until no more are found */
+- conn_seq = conn_configured;
++ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+- }
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index ce1e3b9e14c9..7747f160c740 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -867,7 +867,7 @@ static void
+ vc4_crtc_reset(struct drm_crtc *crtc)
+ {
+ if (crtc->state)
+- __drm_atomic_helper_crtc_destroy_state(crtc->state);
++ vc4_crtc_destroy_state(crtc, crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+ if (crtc->state)
+diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
+index bb27a3150563..2a3ae9006c58 100644
+--- a/drivers/hwtracing/intel_th/gth.c
++++ b/drivers/hwtracing/intel_th/gth.c
+@@ -624,7 +624,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
+ othdev->output.port = -1;
+ othdev->output.active = false;
+ gth->output[port].output = NULL;
+- for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
++ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ if (gth->master[master] == port)
+ gth->master[master] = -1;
+ spin_unlock(&gth->gth_lock);
+diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
+index 524e6134642e..e7013d2d4f0e 100644
+--- a/drivers/infiniband/sw/rdmavt/mr.c
++++ b/drivers/infiniband/sw/rdmavt/mr.c
+@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+- if (mr->mr.length == 0) {
+- mr->mr.user_base = addr;
+- mr->mr.iova = addr;
+- }
+-
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
++ *
+ * Return: number of sg elements mapped to the memory region
+ */
+ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+ {
+ struct rvt_mr *mr = to_imr(ibmr);
++ int ret;
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+- rvt_set_page);
++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
++ mr->mr.user_base = ibmr->iova;
++ mr->mr.iova = ibmr->iova;
++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
++ mr->mr.length = (size_t)ibmr->length;
++ return ret;
+ }
+
+ /**
+@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
++ mr->mr.iova = ibmr->iova;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
+diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
+index bc5e37f30ac1..bb63b8823d62 100644
+--- a/drivers/input/rmi4/rmi_f11.c
++++ b/drivers/input/rmi4/rmi_f11.c
+@@ -1239,7 +1239,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
+ }
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+- &f11->dev_controls, fn->fd.query_base_addr);
++ &f11->dev_controls, fn->fd.control_base_addr);
+ if (rc)
+ dev_warn(&fn->dev, "Failed to write control registers\n");
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 036379a23499..23f0f4eaaa2e 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2917,17 +2917,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ goto bad;
+ }
+ ic->sectors_per_block = val >> SECTOR_SHIFT;
+- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
++ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+ r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
+ "Invalid internal_hash argument");
+ if (r)
+ goto bad;
+- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
++ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+ r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
+ "Invalid journal_crypt argument");
+ if (r)
+ goto bad;
+- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
++ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+ r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
+ "Invalid journal_mac argument");
+ if (r)
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index 103c0a742d03..fef0bff4a54b 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
+ /* create driver workqueue */
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_driver_name);
++ if (!fm10k_workqueue)
++ return -ENOMEM;
+
+ fm10k_dbg_init();
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index d9db3ad3d765..26ad27b3f687 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1622,7 +1622,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
+ break;
+ case MLX5_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
++ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
+ break;
+ default:
+ netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index ccb6287aeeb7..1d2bb7fa68b1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -392,10 +392,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+ i2c_addr = MLX5_I2C_ADDR_LOW;
+- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+- i2c_addr = MLX5_I2C_ADDR_HIGH;
+- offset -= MLX5_EEPROM_PAGE_LENGTH;
+- }
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, module, module_num);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 29d37355d8c6..ab09f9e43c79 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2521,11 +2521,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ if (err)
+ return err;
+
++ mlxsw_sp_port->link.autoneg = autoneg;
++
+ if (!netif_running(dev))
+ return 0;
+
+- mlxsw_sp_port->link.autoneg = autoneg;
+-
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index f2429ec07b57..ecf3f8c1bc0e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2582,8 +2582,6 @@ static int stmmac_open(struct net_device *dev)
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+- stmmac_check_ether_addr(priv);
+-
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
+@@ -4213,6 +4211,8 @@ int stmmac_dvr_probe(struct device *device,
+ if (ret)
+ goto error_hw_init;
+
++ stmmac_check_ether_addr(priv);
++
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+index d819e8eaba12..cc1e887e47b5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
++ /*
++ * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
++ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
++ * has only one pci network device while other asset tags are
++ * for IOT2040 which has two.
++ */
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+- "6ES7647-0AA00-1YA2"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index f4e93f5fc204..ea90db3c7705 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -153,7 +153,7 @@ out_fail:
+ void
+ slhc_free(struct slcompress *comp)
+ {
+- if ( comp == NULLSLCOMPR )
++ if ( IS_ERR_OR_NULL(comp) )
+ return;
+
+ if ( comp->tstate != NULLSLSTATE )
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index fea141e71705..e9a92ed5a308 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1157,6 +1157,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ return -EINVAL;
+ }
+
++ if (netdev_has_upper_dev(dev, port_dev)) {
++ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
++ portname);
++ return -EBUSY;
++ }
++
+ if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ vlan_uses_dev(dev)) {
+ netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 2f3dbf1c3c2d..79d2c0bf7870 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1891,14 +1891,11 @@ int usb_runtime_idle(struct device *dev)
+ return -EBUSY;
+ }
+
+-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
++static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+ {
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+
+- if (enable && !udev->usb2_hw_lpm_allowed)
+- return 0;
+-
+ if (hcd->driver->set_usb2_hw_lpm) {
+ ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
+ if (!ret)
+@@ -1908,6 +1905,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+ return ret;
+ }
+
++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ if (!udev->usb2_hw_lpm_capable ||
++ !udev->usb2_hw_lpm_allowed ||
++ udev->usb2_hw_lpm_enabled)
++ return 0;
++
++ return usb_set_usb2_hardware_lpm(udev, 1);
++}
++
++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ if (!udev->usb2_hw_lpm_enabled)
++ return 0;
++
++ return usb_set_usb2_hardware_lpm(udev, 0);
++}
++
+ #endif /* CONFIG_PM */
+
+ struct bus_type usb_bus_type = {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 4a4e666a8e09..a9541525ea4f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3174,8 +3174,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ }
+
+ /* disable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ if (usb_disable_ltm(udev)) {
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+@@ -3213,8 +3212,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ usb_enable_ltm(udev);
+ err_ltm:
+ /* Try to enable USB2 hardware LPM again */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+@@ -3497,8 +3495,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ hub_port_logical_disconnect(hub, port1);
+ } else {
+ /* Try to enable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ /* Try to enable USB3 LTM */
+ usb_enable_ltm(udev);
+@@ -4334,7 +4331,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
+ if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
+ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ udev->usb2_hw_lpm_allowed = 1;
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ }
+ }
+
+@@ -5491,8 +5488,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ /* Disable USB2 hardware LPM.
+ * It will be re-enabled by the enumeration process.
+ */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ /* Disable LPM and LTM while we reset the device and reinstall the alt
+ * settings. Device-initiated LPM settings, and system exit latency
+@@ -5602,7 +5598,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+
+ done:
+ /* Now that the alt settings are re-installed, enable LTM and LPM. */
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ usb_unlocked_enable_lpm(udev);
+ usb_enable_ltm(udev);
+ usb_release_bos_descriptor(udev);
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 833ddd228e3a..1fe3c5d3be5f 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1182,8 +1182,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
+ dev->actconfig->interface[i] = NULL;
+ }
+
+- if (dev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(dev, 0);
++ usb_disable_usb2_hardware_lpm(dev);
+ usb_unlocked_disable_lpm(dev);
+ usb_disable_ltm(dev);
+
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index d930bfda4010..15c19863f7b3 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -508,7 +508,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
+
+ if (!ret) {
+ udev->usb2_hw_lpm_allowed = value;
+- ret = usb_set_usb2_hardware_lpm(udev, value);
++ if (value)
++ ret = usb_enable_usb2_hardware_lpm(udev);
++ else
++ ret = usb_disable_usb2_hardware_lpm(udev);
+ }
+
+ usb_unlock_device(udev);
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index dc6949248823..1b5f346d93eb 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -89,7 +89,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
+ extern int usb_runtime_suspend(struct device *dev);
+ extern int usb_runtime_resume(struct device *dev);
+ extern int usb_runtime_idle(struct device *dev);
+-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
+
+ #else
+
+@@ -109,7 +110,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
+ return 0;
+ }
+
+-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return 0;
++}
++
++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+ {
+ return 0;
+ }
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 50eeb74ddc0a..f77a9b3370b5 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
+ MODULE_PARM_DESC(disable_hugepages,
+ "Disable VFIO IOMMU support for IOMMU hugepages.");
+
++static unsigned int dma_entry_limit __read_mostly = U16_MAX;
++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
++MODULE_PARM_DESC(dma_entry_limit,
++ "Maximum number of user DMA mappings per container (65535).");
++
+ struct vfio_iommu {
+ struct list_head domain_list;
+ struct vfio_domain *external_domain; /* domain for external user */
+ struct mutex lock;
+ struct rb_root dma_list;
+ struct blocking_notifier_head notifier;
++ unsigned int dma_avail;
+ bool v2;
+ bool nesting;
+ };
+@@ -732,6 +738,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
+ kfree(dma);
++ iommu->dma_avail++;
+ }
+
+ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+@@ -1003,12 +1010,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ goto out_unlock;
+ }
+
++ if (!iommu->dma_avail) {
++ ret = -ENOSPC;
++ goto out_unlock;
++ }
++
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
++ iommu->dma_avail--;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+ dma->prot = prot;
+@@ -1504,6 +1517,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
+
+ INIT_LIST_HEAD(&iommu->domain_list);
+ iommu->dma_list = RB_ROOT;
++ iommu->dma_avail = dma_entry_limit;
+ mutex_init(&iommu->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 8a5266699b67..56e8fc896f6b 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1454,6 +1454,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
+ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
+ {
+ struct ceph_inode_info *dci = ceph_inode(dir);
++ unsigned hash;
+
+ switch (dci->i_dir_layout.dl_dir_hash) {
+ case 0: /* for backward compat */
+@@ -1461,8 +1462,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
+ return dn->d_name.hash;
+
+ default:
+- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
++ spin_lock(&dn->d_lock);
++ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+ dn->d_name.name, dn->d_name.len);
++ spin_unlock(&dn->d_lock);
++ return hash;
+ }
+ }
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index a48984dd6426..e1ded4bd6115 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1219,6 +1219,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
+ ci->i_prealloc_cap_flush = NULL;
+ }
++
++ if (drop &&
++ ci->i_wrbuffer_ref_head == 0 &&
++ ci->i_wr_ref == 0 &&
++ ci->i_dirty_caps == 0 &&
++ ci->i_flushing_caps == 0) {
++ ceph_put_snap_context(ci->i_head_snapc);
++ ci->i_head_snapc = NULL;
++ }
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ while (!list_empty(&to_remove)) {
+@@ -1863,10 +1872,39 @@ retry:
+ return path;
+ }
+
++/* Duplicate the dentry->d_name.name safely */
++static int clone_dentry_name(struct dentry *dentry, const char **ppath,
++ int *ppathlen)
++{
++ u32 len;
++ char *name;
++
++retry:
++ len = READ_ONCE(dentry->d_name.len);
++ name = kmalloc(len + 1, GFP_NOFS);
++ if (!name)
++ return -ENOMEM;
++
++ spin_lock(&dentry->d_lock);
++ if (dentry->d_name.len != len) {
++ spin_unlock(&dentry->d_lock);
++ kfree(name);
++ goto retry;
++ }
++ memcpy(name, dentry->d_name.name, len);
++ spin_unlock(&dentry->d_lock);
++
++ name[len] = '\0';
++ *ppath = name;
++ *ppathlen = len;
++ return 0;
++}
++
+ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ const char **ppath, int *ppathlen, u64 *pino,
+- int *pfreepath)
++ bool *pfreepath, bool parent_locked)
+ {
++ int ret;
+ char *path;
+
+ rcu_read_lock();
+@@ -1875,8 +1913,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dir);
+ rcu_read_unlock();
+- *ppath = dentry->d_name.name;
+- *ppathlen = dentry->d_name.len;
++ if (parent_locked) {
++ *ppath = dentry->d_name.name;
++ *ppathlen = dentry->d_name.len;
++ } else {
++ ret = clone_dentry_name(dentry, ppath, ppathlen);
++ if (ret)
++ return ret;
++ *pfreepath = true;
++ }
+ return 0;
+ }
+ rcu_read_unlock();
+@@ -1884,13 +1929,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+- *pfreepath = 1;
++ *pfreepath = true;
+ return 0;
+ }
+
+ static int build_inode_path(struct inode *inode,
+ const char **ppath, int *ppathlen, u64 *pino,
+- int *pfreepath)
++ bool *pfreepath)
+ {
+ struct dentry *dentry;
+ char *path;
+@@ -1906,7 +1951,7 @@ static int build_inode_path(struct inode *inode,
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+- *pfreepath = 1;
++ *pfreepath = true;
+ return 0;
+ }
+
+@@ -1917,7 +1962,7 @@ static int build_inode_path(struct inode *inode,
+ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ struct inode *rdiri, const char *rpath,
+ u64 rino, const char **ppath, int *pathlen,
+- u64 *ino, int *freepath)
++ u64 *ino, bool *freepath, bool parent_locked)
+ {
+ int r = 0;
+
+@@ -1927,7 +1972,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ ceph_snap(rinode));
+ } else if (rdentry) {
+ r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
+- freepath);
++ freepath, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ *ppath);
+ } else if (rpath || rino) {
+@@ -1953,7 +1998,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
+ const char *path2 = NULL;
+ u64 ino1 = 0, ino2 = 0;
+ int pathlen1 = 0, pathlen2 = 0;
+- int freepath1 = 0, freepath2 = 0;
++ bool freepath1 = false, freepath2 = false;
+ int len;
+ u16 releases;
+ void *p, *end;
+@@ -1961,16 +2006,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
+
+ ret = set_request_path_attr(req->r_inode, req->r_dentry,
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+- &path1, &pathlen1, &ino1, &freepath1);
++ &path1, &pathlen1, &ino1, &freepath1,
++ test_bit(CEPH_MDS_R_PARENT_LOCKED,
++ &req->r_req_flags));
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out;
+ }
+
++ /* If r_old_dentry is set, then assume that its parent is locked */
+ ret = set_request_path_attr(NULL, req->r_old_dentry,
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+- &path2, &pathlen2, &ino2, &freepath2);
++ &path2, &pathlen2, &ino2, &freepath2, true);
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out_free1;
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 9b6207c84b68..a7e763dac038 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
+ old_snapc = NULL;
+
+ update_snapc:
+- if (ci->i_head_snapc) {
++ if (ci->i_wrbuffer_ref_head == 0 &&
++ ci->i_wr_ref == 0 &&
++ ci->i_dirty_caps == 0 &&
++ ci->i_flushing_caps == 0) {
++ ci->i_head_snapc = NULL;
++ } else {
+ ci->i_head_snapc = ceph_get_snap_context(new_snapc);
+ dout(" new snapc is %p\n", new_snapc);
+ }
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 6fd4a6a75234..e7192ee7a89c 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1730,6 +1730,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+ if (rc == 0 || rc != -EBUSY)
+ goto do_rename_exit;
+
++ /* Don't fall back to using SMB on SMB 2+ mount */
++ if (server->vals->protocol_id != 0)
++ goto do_rename_exit;
++
+ /* open-file renames don't work across directories */
+ if (to_dentry->d_parent != from_dentry->d_parent)
+ goto do_rename_exit;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 311761a6ef6d..6761e905cab0 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -828,6 +828,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
++ bh = NULL;
+ goto out;
+ }
+
+@@ -2905,6 +2906,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
+ if (error == -EIO)
+ EXT4_ERROR_INODE(inode, "block %llu read error",
+ EXT4_I(inode)->i_file_acl);
++ bh = NULL;
+ goto cleanup;
+ }
+ error = ext4_xattr_check_block(inode, bh);
+@@ -3061,6 +3063,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
+ if (IS_ERR(bh)) {
+ if (PTR_ERR(bh) == -ENOMEM)
+ return NULL;
++ bh = NULL;
+ EXT4_ERROR_INODE(inode, "block %lu read error",
+ (unsigned long)ce->e_value);
+ } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 77d8d03344c8..f464f8d9060c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2044,7 +2044,8 @@ static int nfs23_validate_mount_data(void *options,
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ args->nfs_server.addrlen = sizeof(data->addr);
+ args->nfs_server.port = ntohs(data->addr.sin_port);
+- if (!nfs_verify_server_address(sap))
++ if (sap->sa_family != AF_INET ||
++ !nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (!(data->flags & NFS_MOUNT_TCP))
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 49b0a9e7ff18..80aeb19b176b 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -939,8 +939,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
+ cb->cb_seq_status = 1;
+ cb->cb_status = 0;
+ if (minorversion) {
+- if (!nfsd41_cb_get_slot(clp, task))
++ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
+ return;
++ cb->cb_holds_slot = true;
+ }
+ rpc_call_start(task);
+ }
+@@ -967,6 +968,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
+ return true;
+ }
+
++ if (!cb->cb_holds_slot)
++ goto need_restart;
++
+ switch (cb->cb_seq_status) {
+ case 0:
+ /*
+@@ -1004,6 +1008,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
+ cb->cb_seq_status);
+ }
+
++ cb->cb_holds_slot = false;
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_wake_up_next(&clp->cl_cb_waitq);
+ dprintk("%s: freed slot, new seqid=%d\n", __func__,
+@@ -1211,6 +1216,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
+ cb->cb_seq_status = 1;
+ cb->cb_status = 0;
+ cb->cb_need_restart = false;
++ cb->cb_holds_slot = false;
+ }
+
+ void nfsd4_run_cb(struct nfsd4_callback *cb)
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index 86aa92d200e1..133d8bf62a5c 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -69,6 +69,7 @@ struct nfsd4_callback {
+ int cb_seq_status;
+ int cb_status;
+ bool cb_need_restart;
++ bool cb_holds_slot;
+ };
+
+ struct nfsd4_callback_ops {
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 8d5422bb9c1a..555698ddb943 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1620,9 +1620,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
+ if (--header->nreg)
+ return;
+
+- if (parent)
++ if (parent) {
+ put_links(header);
+- start_unregistering(header);
++ start_unregistering(header);
++ }
++
+ if (!--header->count)
+ kfree_rcu(header, rcu);
+
+diff --git a/fs/splice.c b/fs/splice.c
+index 00d2f142dcf9..a598d444abe1 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -332,8 +332,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
+ .get = generic_pipe_buf_get,
+ };
+
+-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
++int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
++ struct pipe_buffer *buf)
+ {
+ return 1;
+ }
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index befdcd304b3d..2dcf6e81b2e2 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -182,6 +182,7 @@ void free_pipe_info(struct pipe_inode_info *);
+ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
++int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+ void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
+
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index b2589c7e9439..22770168bff8 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -217,7 +217,6 @@ static void task_non_contending(struct task_struct *p)
+ if (dl_se->dl_runtime == 0)
+ return;
+
+- WARN_ON(hrtimer_active(&dl_se->inactive_timer));
+ WARN_ON(dl_se->dl_non_contending);
+
+ zerolag_time = dl_se->deadline -
+@@ -234,7 +233,7 @@ static void task_non_contending(struct task_struct *p)
+ * If the "0-lag time" already passed, decrease the active
+ * utilization now, instead of starting a timer
+ */
+- if (zerolag_time < 0) {
++ if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
+ if (dl_task(p))
+ sub_running_bw(dl_se->dl_bw, dl_rq);
+ if (!dl_task(p) || p->state == TASK_DEAD) {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index a5d163903835..af7de1f9906c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2026,6 +2026,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
+ if (p->last_task_numa_placement) {
+ delta = runtime - p->last_sum_exec_runtime;
+ *period = now - p->last_task_numa_placement;
++
++ /* Avoid time going backwards, prevent potential divide error: */
++ if (unlikely((s64)*period < 0))
++ *period = 0;
+ } else {
+ delta = p->se.avg.load_sum / p->se.load.weight;
+ *period = LOAD_AVG_MAX;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 5f7f4f07499f..8123a8b53c54 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -700,7 +700,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+
+ preempt_disable_notrace();
+ time = rb_time_stamp(buffer);
+- preempt_enable_no_resched_notrace();
++ preempt_enable_notrace();
+
+ return time;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ffddb5ac255c..591be15404a1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -494,8 +494,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+ * not modified.
+ */
+ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+- if (!pid_list)
++ if (!pid_list) {
++ trace_parser_put(&parser);
+ return -ENOMEM;
++ }
+
+ pid_list->pid_max = READ_ONCE(pid_max);
+
+@@ -505,6 +507,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+
+ pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
+ if (!pid_list->pids) {
++ trace_parser_put(&parser);
+ kfree(pid_list);
+ return -ENOMEM;
+ }
+@@ -6716,19 +6719,23 @@ struct buffer_ref {
+ struct ring_buffer *buffer;
+ void *page;
+ int cpu;
+- int ref;
++ refcount_t refcount;
+ };
+
++static void buffer_ref_release(struct buffer_ref *ref)
++{
++ if (!refcount_dec_and_test(&ref->refcount))
++ return;
++ ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
++ kfree(ref);
++}
++
+ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+- if (--ref->ref)
+- return;
+-
+- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+- kfree(ref);
++ buffer_ref_release(ref);
+ buf->private = 0;
+ }
+
+@@ -6737,7 +6744,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+ {
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+- ref->ref++;
++ refcount_inc(&ref->refcount);
+ }
+
+ /* Pipe buffer operations for a buffer. */
+@@ -6745,7 +6752,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
+ .can_merge = 0,
+ .confirm = generic_pipe_buf_confirm,
+ .release = buffer_pipe_buf_release,
+- .steal = generic_pipe_buf_steal,
++ .steal = generic_pipe_buf_nosteal,
+ .get = buffer_pipe_buf_get,
+ };
+
+@@ -6758,11 +6765,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+ struct buffer_ref *ref =
+ (struct buffer_ref *)spd->partial[i].private;
+
+- if (--ref->ref)
+- return;
+-
+- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+- kfree(ref);
++ buffer_ref_release(ref);
+ spd->partial[i].private = 0;
+ }
+
+@@ -6817,7 +6820,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ break;
+ }
+
+- ref->ref = 1;
++ refcount_set(&ref->refcount, 1);
+ ref->buffer = iter->trace_buffer->buffer;
+ ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
+ if (IS_ERR(ref->page)) {
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 62d0e25c054c..131d5871f8c9 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1884,6 +1884,7 @@ config TEST_KMOD
+ depends on m
+ depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
+ depends on NETDEVICES && NET_CORE && INET # for TUN
++ depends on BLOCK
+ select TEST_LKM
+ select XFS_FS
+ select TUN
+diff --git a/mm/memory.c b/mm/memory.c
+index fb9f7737c1ff..f99b64ca1303 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1804,10 +1804,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ * in may not match the PFN we have mapped if the
+ * mapped PFN is a writeable COW page. In the mkwrite
+ * case we are creating a writable PTE for a shared
+- * mapping and we expect the PFNs to match.
++ * mapping and we expect the PFNs to match. If they
++ * don't match, we are likely racing with block
++ * allocation and mapping invalidation so just skip the
++ * update.
+ */
+- if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
++ if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
++ WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
+ goto out_unlock;
++ }
+ entry = *pte;
+ goto out_mkwrite;
+ } else
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 38b3309edba8..b967bd51bf1f 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2030,7 +2030,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+ if (match_kern)
+ match_kern->match_size = ret;
+
+- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
++ /* rule should have no remaining data after target */
++ if (type == EBT_COMPAT_TARGET && size_left)
+ return -EINVAL;
+
+ match32 = (struct compat_ebt_entry_mwt *) buf;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c64f062d6323..6a7e187dd0a9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1192,25 +1192,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+ return dst;
+ }
+
+-static void ipv4_link_failure(struct sk_buff *skb)
++static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ {
+ struct ip_options opt;
+- struct rtable *rt;
+ int res;
+
+ /* Recompile ip options since IPCB may not be valid anymore.
++ * Also check we have a reasonable ipv4 header.
+ */
+- memset(&opt, 0, sizeof(opt));
+- opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
++ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
++ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
++ return;
+
+- rcu_read_lock();
+- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+- rcu_read_unlock();
++ memset(&opt, 0, sizeof(opt));
++ if (ip_hdr(skb)->ihl > 5) {
++ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
++ return;
++ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+- if (res)
+- return;
++ rcu_read_lock();
++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
++ rcu_read_unlock();
+
++ if (res)
++ return;
++ }
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
++}
++
++static void ipv4_link_failure(struct sk_buff *skb)
++{
++ struct rtable *rt;
++
++ ipv4_send_dest_unreach(skb);
+
+ rt = skb_rtable(skb);
+ if (rt)
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index d82e8344fc54..e8caab8e2f5c 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -45,6 +45,7 @@ static int tcp_syn_retries_min = 1;
+ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
+ static int ip_ping_group_range_min[] = { 0, 0 };
+ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
++static int one_day_secs = 24 * 3600;
+
+ /* obsolete */
+ static int sysctl_tcp_low_latency __read_mostly;
+@@ -552,7 +553,9 @@ static struct ctl_table ipv4_table[] = {
+ .data = &sysctl_tcp_min_rtt_wlen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one_day_secs
+ },
+ {
+ .procname = "tcp_low_latency",
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 56dd5ce6274f..6d7608b88f66 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -889,12 +889,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
+ {
+ struct ip_vs_dest *dest;
+ unsigned int atype, i;
+- int ret = 0;
+
+ EnterFunction(2);
+
+ #ifdef CONFIG_IP_VS_IPV6
+ if (udest->af == AF_INET6) {
++ int ret;
++
+ atype = ipv6_addr_type(&udest->addr.in6);
+ if ((!(atype & IPV6_ADDR_UNICAST) ||
+ atype & IPV6_ADDR_LINKLOCAL) &&
+diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
+index 86ef907067bb..353b59d3bd44 100644
+--- a/net/rds/ib_fmr.c
++++ b/net/rds/ib_fmr.c
+@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
+ else
+ pool = rds_ibdev->mr_1m_pool;
+
++ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
++ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
++
++ /* Switch pools if one of the pool is reaching upper limit */
++ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
++ if (pool->pool_type == RDS_IB_MR_8K_POOL)
++ pool = rds_ibdev->mr_1m_pool;
++ else
++ pool = rds_ibdev->mr_8k_pool;
++ }
++
+ ibmr = rds_ib_try_reuse_ibmr(pool);
+ if (ibmr)
+ return ibmr;
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index 9a3c54e659e9..fe5d2e8a95d9 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -442,9 +442,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
+ struct rds_ib_mr *ibmr = NULL;
+ int iter = 0;
+
+- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
+- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+-
+ while (1) {
+ ibmr = rds_ib_reuse_mr(pool);
+ if (ibmr)
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 4a9729257023..6a5c4992cf61 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -318,9 +318,11 @@ void rose_destroy_socket(struct sock *);
+ /*
+ * Handler for deferred kills.
+ */
+-static void rose_destroy_timer(unsigned long data)
++static void rose_destroy_timer(struct timer_list *t)
+ {
+- rose_destroy_socket((struct sock *)data);
++ struct sock *sk = from_timer(sk, t, sk_timer);
++
++ rose_destroy_socket(sk);
+ }
+
+ /*
+@@ -353,8 +355,7 @@ void rose_destroy_socket(struct sock *sk)
+
+ if (sk_has_allocations(sk)) {
+ /* Defer: outstanding buffers */
+- setup_timer(&sk->sk_timer, rose_destroy_timer,
+- (unsigned long)sk);
++ timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ add_timer(&sk->sk_timer);
+ } else
+@@ -538,8 +539,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol,
+ sock->ops = &rose_proto_ops;
+ sk->sk_protocol = protocol;
+
+- init_timer(&rose->timer);
+- init_timer(&rose->idletimer);
++ timer_setup(&rose->timer, NULL, 0);
++ timer_setup(&rose->idletimer, NULL, 0);
+
+ rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
+ rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
+@@ -582,8 +583,8 @@ static struct sock *rose_make_new(struct sock *osk)
+ sk->sk_state = TCP_ESTABLISHED;
+ sock_copy_flags(sk, osk);
+
+- init_timer(&rose->timer);
+- init_timer(&rose->idletimer);
++ timer_setup(&rose->timer, NULL, 0);
++ timer_setup(&rose->idletimer, NULL, 0);
+
+ orose = rose_sk(osk);
+ rose->t1 = orose->t1;
+diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
+index c76638cc2cd5..cda4c6678ef1 100644
+--- a/net/rose/rose_link.c
++++ b/net/rose/rose_link.c
+@@ -27,8 +27,8 @@
+ #include <linux/interrupt.h>
+ #include <net/rose.h>
+
+-static void rose_ftimer_expiry(unsigned long);
+-static void rose_t0timer_expiry(unsigned long);
++static void rose_ftimer_expiry(struct timer_list *);
++static void rose_t0timer_expiry(struct timer_list *);
+
+ static void rose_transmit_restart_confirmation(struct rose_neigh *neigh);
+ static void rose_transmit_restart_request(struct rose_neigh *neigh);
+@@ -37,8 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
+ {
+ del_timer(&neigh->ftimer);
+
+- neigh->ftimer.data = (unsigned long)neigh;
+- neigh->ftimer.function = &rose_ftimer_expiry;
++ neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
+ neigh->ftimer.expires =
+ jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
+
+@@ -49,8 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
+ {
+ del_timer(&neigh->t0timer);
+
+- neigh->t0timer.data = (unsigned long)neigh;
+- neigh->t0timer.function = &rose_t0timer_expiry;
++ neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
+ neigh->t0timer.expires =
+ jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
+
+@@ -77,13 +75,13 @@ static int rose_t0timer_running(struct rose_neigh *neigh)
+ return timer_pending(&neigh->t0timer);
+ }
+
+-static void rose_ftimer_expiry(unsigned long param)
++static void rose_ftimer_expiry(struct timer_list *t)
+ {
+ }
+
+-static void rose_t0timer_expiry(unsigned long param)
++static void rose_t0timer_expiry(struct timer_list *t)
+ {
+- struct rose_neigh *neigh = (struct rose_neigh *)param;
++ struct rose_neigh *neigh = from_timer(neigh, t, t0timer);
+
+ rose_transmit_restart_request(neigh);
+
+diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
+index 344456206b70..094a6621f8e8 100644
+--- a/net/rose/rose_loopback.c
++++ b/net/rose/rose_loopback.c
+@@ -16,15 +16,17 @@
+ #include <linux/init.h>
+
+ static struct sk_buff_head loopback_queue;
++#define ROSE_LOOPBACK_LIMIT 1000
+ static struct timer_list loopback_timer;
+
+ static void rose_set_loopback_timer(void);
++static void rose_loopback_timer(struct timer_list *unused);
+
+ void rose_loopback_init(void)
+ {
+ skb_queue_head_init(&loopback_queue);
+
+- init_timer(&loopback_timer);
++ timer_setup(&loopback_timer, rose_loopback_timer, 0);
+ }
+
+ static int rose_loopback_running(void)
+@@ -34,36 +36,30 @@ static int rose_loopback_running(void)
+
+ int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
+ {
+- struct sk_buff *skbn;
++ struct sk_buff *skbn = NULL;
+
+- skbn = skb_clone(skb, GFP_ATOMIC);
++ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
++ skbn = skb_clone(skb, GFP_ATOMIC);
+
+- kfree_skb(skb);
+-
+- if (skbn != NULL) {
++ if (skbn) {
++ consume_skb(skb);
+ skb_queue_tail(&loopback_queue, skbn);
+
+ if (!rose_loopback_running())
+ rose_set_loopback_timer();
++ } else {
++ kfree_skb(skb);
+ }
+
+ return 1;
+ }
+
+-static void rose_loopback_timer(unsigned long);
+-
+ static void rose_set_loopback_timer(void)
+ {
+- del_timer(&loopback_timer);
+-
+- loopback_timer.data = 0;
+- loopback_timer.function = &rose_loopback_timer;
+- loopback_timer.expires = jiffies + 10;
+-
+- add_timer(&loopback_timer);
++ mod_timer(&loopback_timer, jiffies + 10);
+ }
+
+-static void rose_loopback_timer(unsigned long param)
++static void rose_loopback_timer(struct timer_list *unused)
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+@@ -71,8 +67,12 @@ static void rose_loopback_timer(unsigned long param)
+ struct sock *sk;
+ unsigned short frametype;
+ unsigned int lci_i, lci_o;
++ int count;
+
+- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
++ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
++ skb = skb_dequeue(&loopback_queue);
++ if (!skb)
++ return;
+ if (skb->len < ROSE_MIN_LEN) {
+ kfree_skb(skb);
+ continue;
+@@ -109,6 +109,8 @@ static void rose_loopback_timer(unsigned long param)
+ kfree_skb(skb);
+ }
+ }
++ if (!skb_queue_empty(&loopback_queue))
++ mod_timer(&loopback_timer, jiffies + 1);
+ }
+
+ void __exit rose_loopback_clear(void)
+diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
+index 2741abec7ee7..d94d6110bb1c 100644
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -104,8 +104,8 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
+
+ skb_queue_head_init(&rose_neigh->queue);
+
+- init_timer(&rose_neigh->ftimer);
+- init_timer(&rose_neigh->t0timer);
++ timer_setup(&rose_neigh->ftimer, NULL, 0);
++ timer_setup(&rose_neigh->t0timer, NULL, 0);
+
+ if (rose_route->ndigis != 0) {
+ rose_neigh->digipeat =
+@@ -390,8 +390,8 @@ void rose_add_loopback_neigh(void)
+
+ skb_queue_head_init(&sn->queue);
+
+- init_timer(&sn->ftimer);
+- init_timer(&sn->t0timer);
++ timer_setup(&sn->ftimer, NULL, 0);
++ timer_setup(&sn->t0timer, NULL, 0);
+
+ spin_lock_bh(&rose_neigh_list_lock);
+ sn->next = rose_neigh_list;
+diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
+index bc5469d6d9cb..3b89d66f15bb 100644
+--- a/net/rose/rose_timer.c
++++ b/net/rose/rose_timer.c
+@@ -29,8 +29,8 @@
+ #include <net/rose.h>
+
+ static void rose_heartbeat_expiry(unsigned long);
+-static void rose_timer_expiry(unsigned long);
+-static void rose_idletimer_expiry(unsigned long);
++static void rose_timer_expiry(struct timer_list *);
++static void rose_idletimer_expiry(struct timer_list *);
+
+ void rose_start_heartbeat(struct sock *sk)
+ {
+@@ -49,8 +49,7 @@ void rose_start_t1timer(struct sock *sk)
+
+ del_timer(&rose->timer);
+
+- rose->timer.data = (unsigned long)sk;
+- rose->timer.function = &rose_timer_expiry;
++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.expires = jiffies + rose->t1;
+
+ add_timer(&rose->timer);
+@@ -62,8 +61,7 @@ void rose_start_t2timer(struct sock *sk)
+
+ del_timer(&rose->timer);
+
+- rose->timer.data = (unsigned long)sk;
+- rose->timer.function = &rose_timer_expiry;
++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.expires = jiffies + rose->t2;
+
+ add_timer(&rose->timer);
+@@ -75,8 +73,7 @@ void rose_start_t3timer(struct sock *sk)
+
+ del_timer(&rose->timer);
+
+- rose->timer.data = (unsigned long)sk;
+- rose->timer.function = &rose_timer_expiry;
++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.expires = jiffies + rose->t3;
+
+ add_timer(&rose->timer);
+@@ -88,8 +85,7 @@ void rose_start_hbtimer(struct sock *sk)
+
+ del_timer(&rose->timer);
+
+- rose->timer.data = (unsigned long)sk;
+- rose->timer.function = &rose_timer_expiry;
++ rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.expires = jiffies + rose->hb;
+
+ add_timer(&rose->timer);
+@@ -102,8 +98,7 @@ void rose_start_idletimer(struct sock *sk)
+ del_timer(&rose->idletimer);
+
+ if (rose->idle > 0) {
+- rose->idletimer.data = (unsigned long)sk;
+- rose->idletimer.function = &rose_idletimer_expiry;
++ rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
+ rose->idletimer.expires = jiffies + rose->idle;
+
+ add_timer(&rose->idletimer);
+@@ -163,10 +158,10 @@ static void rose_heartbeat_expiry(unsigned long param)
+ bh_unlock_sock(sk);
+ }
+
+-static void rose_timer_expiry(unsigned long param)
++static void rose_timer_expiry(struct timer_list *t)
+ {
+- struct sock *sk = (struct sock *)param;
+- struct rose_sock *rose = rose_sk(sk);
++ struct rose_sock *rose = from_timer(rose, t, timer);
++ struct sock *sk = &rose->sock;
+
+ bh_lock_sock(sk);
+ switch (rose->state) {
+@@ -192,9 +187,10 @@ static void rose_timer_expiry(unsigned long param)
+ bh_unlock_sock(sk);
+ }
+
+-static void rose_idletimer_expiry(unsigned long param)
++static void rose_idletimer_expiry(struct timer_list *t)
+ {
+- struct sock *sk = (struct sock *)param;
++ struct rose_sock *rose = from_timer(rose, t, idletimer);
++ struct sock *sk = &rose->sock;
+
+ bh_lock_sock(sk);
+ rose_clear_queues(sk);
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index f2cf4edf219b..475b453dc7ae 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
+ h->last_refresh = now;
+ }
+
++static inline int cache_is_valid(struct cache_head *h);
+ static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+ struct cache_detail *detail);
+ static void cache_fresh_unlocked(struct cache_head *head,
+@@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
+ if (cache_is_expired(detail, tmp)) {
+ hlist_del_init(&tmp->cache_list);
+ detail->entries --;
++ if (cache_is_valid(tmp) == -EAGAIN)
++ set_bit(CACHE_NEGATIVE, &tmp->flags);
+ cache_fresh_locked(tmp, 0, detail);
+ freeme = tmp;
+ break;
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 73895daf8943..aa75bc8b158f 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -262,8 +262,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ if (msg->rep_type)
+ tipc_tlv_init(msg->rep, msg->rep_type);
+
+- if (cmd->header)
+- (*cmd->header)(msg);
++ if (cmd->header) {
++ err = (*cmd->header)(msg);
++ if (err) {
++ kfree_skb(msg->rep);
++ msg->rep = NULL;
++ return err;
++ }
++ }
+
+ arg = nlmsg_new(0, GFP_KERNEL);
+ if (!arg) {
+@@ -388,7 +394,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ if (!bearer)
+ return -EMSGSIZE;
+
+- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++ len = TLV_GET_DATA_LEN(msg->req);
++ len -= offsetof(struct tipc_bearer_config, name);
++ if (len <= 0)
++ return -EINVAL;
++
++ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
+@@ -757,7 +768,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++ len = TLV_GET_DATA_LEN(msg->req);
++ len -= offsetof(struct tipc_link_config, name);
++ if (len <= 0)
++ return -EINVAL;
++
++ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index edba7ab97563..40a8731c663b 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
+ */
+ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ {
++ const struct virtio_transport *t;
++ struct virtio_vsock_pkt *reply;
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = le16_to_cpu(pkt->hdr.type),
+@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+- pkt = virtio_transport_alloc_pkt(&info, 0,
+- le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port),
+- le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
+- if (!pkt)
++ reply = virtio_transport_alloc_pkt(&info, 0,
++ le64_to_cpu(pkt->hdr.dst_cid),
++ le32_to_cpu(pkt->hdr.dst_port),
++ le64_to_cpu(pkt->hdr.src_cid),
++ le32_to_cpu(pkt->hdr.src_port));
++ if (!reply)
+ return -ENOMEM;
+
+- return virtio_transport_get_ops()->send_pkt(pkt);
++ t = virtio_transport_get_ops();
++ if (!t) {
++ virtio_transport_free_pkt(reply);
++ return -ENOTCONN;
++ }
++
++ return t->send_pkt(reply);
+ }
+
+ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index a0ad87e869f9..a33fa1a91873 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -165,9 +165,7 @@ cc-ldoption = $(call try-run,\
+
+ # ld-option
+ # Usage: LDFLAGS += $(call ld-option, -X)
+-ld-option = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
+- $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
++ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
+
+ # ar-option
+ # Usage: KBUILD_ARFLAGS := $(call ar-option,D)