diff options
author | 2019-12-05 02:04:26 +0100 | |
---|---|---|
committer | 2019-12-05 02:04:26 +0100 | |
commit | cc91c657acab8ab6570e424e6b949774ea6eb5c0 (patch) | |
tree | 83cb4655ea4e35b061c96b21b99a60943f34b979 | |
parent | Linux patch 5.4.1 (diff) | |
download | linux-patches-cc91c657acab8ab6570e424e6b949774ea6eb5c0.tar.gz linux-patches-cc91c657acab8ab6570e424e6b949774ea6eb5c0.tar.bz2 linux-patches-cc91c657acab8ab6570e424e6b949774ea6eb5c0.zip |
Linux patch 5.4.25.4-3
Signed-off-by: Thomas Deutschmann <whissi@whissi.de>
-rw-r--r-- | 1001_linux-5.4.2.patch | 1945 |
1 files changed, 1945 insertions, 0 deletions
diff --git a/1001_linux-5.4.2.patch b/1001_linux-5.4.2.patch new file mode 100644 index 00000000..fba0134f --- /dev/null +++ b/1001_linux-5.4.2.patch @@ -0,0 +1,1945 @@ +diff --git a/Makefile b/Makefile +index 641a62423fd6..e67f2e95b71d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 1 ++SUBLEVEL = 2 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h +index 4c95c365058a..44c48e34d799 100644 +--- a/arch/x86/include/asm/fpu/internal.h ++++ b/arch/x86/include/asm/fpu/internal.h +@@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) + + static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) + { +- return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; ++ return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; + } + + /* +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index b230beb6ccb4..3c0cd20925b7 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -1278,6 +1278,11 @@ struct bus_type platform_bus_type = { + }; + EXPORT_SYMBOL_GPL(platform_bus_type); + ++static inline int __platform_match(struct device *dev, const void *drv) ++{ ++ return platform_match(dev, (struct device_driver *)drv); ++} ++ + /** + * platform_find_device_by_driver - Find a platform device with a given + * driver. +@@ -1288,7 +1293,7 @@ struct device *platform_find_device_by_driver(struct device *start, + const struct device_driver *drv) + { + return bus_find_device(&platform_bus_type, start, drv, +- (void *)platform_match); ++ __platform_match); + } + EXPORT_SYMBOL_GPL(platform_find_device_by_driver); + +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index 1fb622f2a87d..8eabf7b20101 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -287,6 +287,7 @@ config CRYPTO_DEV_TALITOS + select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + select CRYPTO_HASH ++ select CRYPTO_LIB_DES + select HW_RANDOM + depends on FSL_SOC + help +diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c +index 4ab1bde8dd9b..294debd435b6 100644 +--- a/drivers/crypto/inside-secure/safexcel.c ++++ b/drivers/crypto/inside-secure/safexcel.c +@@ -221,9 +221,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) + /* Step #3: Determine log2 of hash table size */ + cs_ht_sz = __fls(asize - cs_rc_max) - 2; + /* Step #4: determine current size of hash table in dwords */ +- cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */ ++ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */ + /* Step #5: add back excess words and see if we can fit more records */ +- cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4)); ++ cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2)); + + /* Clear the cache RAMs */ + eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 63fdbf09b044..2fa3587d974f 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -211,6 +211,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) + return 0; /* we know nothing about this usage type */ + } + ++/* ++ * Concatenate usage which defines 16 bits or less with the ++ * currently defined usage page to form a 32 bit usage ++ */ ++ ++static void complete_usage(struct hid_parser *parser, unsigned int index) ++{ ++ parser->local.usage[index] &= 0xFFFF; ++ parser->local.usage[index] |= ++ (parser->global.usage_page & 0xFFFF) << 16; ++} ++ + /* + * Add a usage to the temporary parser table. + */ +@@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) + return -1; + } + parser->local.usage[parser->local.usage_index] = usage; ++ ++ /* ++ * If Usage item only includes usage id, concatenate it with ++ * currently defined usage page ++ */ ++ if (size <= 2) ++ complete_usage(parser, parser->local.usage_index); ++ + parser->local.usage_size[parser->local.usage_index] = size; + parser->local.collection_index[parser->local.usage_index] = + parser->collection_stack_ptr ? +@@ -543,13 +563,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) + * usage value." + */ + +-static void hid_concatenate_usage_page(struct hid_parser *parser) ++static void hid_concatenate_last_usage_page(struct hid_parser *parser) + { + int i; ++ unsigned int usage_page; ++ unsigned int current_page; + +- for (i = 0; i < parser->local.usage_index; i++) +- if (parser->local.usage_size[i] <= 2) +- parser->local.usage[i] += parser->global.usage_page << 16; ++ if (!parser->local.usage_index) ++ return; ++ ++ usage_page = parser->global.usage_page; ++ ++ /* ++ * Concatenate usage page again only if last declared Usage Page ++ * has not been already used in previous usages concatenation ++ */ ++ for (i = parser->local.usage_index - 1; i >= 0; i--) { ++ if (parser->local.usage_size[i] > 2) ++ /* Ignore extended usages */ ++ continue; ++ ++ current_page = parser->local.usage[i] >> 16; ++ if (current_page == usage_page) ++ break; ++ ++ complete_usage(parser, i); ++ } + } + + /* +@@ -561,7 +600,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) + __u32 data; + int ret; + +- hid_concatenate_usage_page(parser); ++ hid_concatenate_last_usage_page(parser); + + data = item_udata(item); + +@@ -772,7 +811,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) + __u32 data; + int i; + +- hid_concatenate_usage_page(parser); ++ hid_concatenate_last_usage_page(parser); + + data = item_udata(item); + +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 985bd4fd3328..53bb394ccba6 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -873,15 +873,16 @@ static const struct device_type mei_cl_device_type = { + + /** + * mei_cl_bus_set_name - set device name for me client device ++ * <controller>-<client device> ++ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb + * + * @cldev: me client device + */ + static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) + { +- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X", +- cldev->name, +- mei_me_cl_uuid(cldev->me_cl), +- mei_me_cl_ver(cldev->me_cl)); ++ dev_set_name(&cldev->dev, "%s-%pUl", ++ dev_name(cldev->bus->dev), ++ mei_me_cl_uuid(cldev->me_cl)); + } + + /** +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index c09f8bb49495..b359f06f05e7 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -81,6 +81,7 @@ + + #define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ + #define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ ++#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */ + + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ + +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 3dca63eddaa0..ce43415a536c 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -98,6 +98,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + +diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c +index 7687ddcae159..aa140662c7c2 100644 +--- a/drivers/net/dsa/sja1105/sja1105_main.c ++++ b/drivers/net/dsa/sja1105/sja1105_main.c +@@ -594,15 +594,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, + int i; + + for (i = 0; i < SJA1105_NUM_PORTS; i++) { +- if (ports->role == XMII_MAC) ++ if (ports[i].role == XMII_MAC) + continue; + +- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || +- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) ++ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || ++ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + priv->rgmii_rx_delay[i] = true; + +- if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || +- ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) ++ if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || ++ ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + priv->rgmii_tx_delay[i] = true; + + if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 1e1b774e1953..0f10a272827c 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -4392,6 +4392,7 @@ static int macb_remove(struct platform_device *pdev) + mdiobus_free(bp->mii_bus); + + unregister_netdev(dev); ++ tasklet_kill(&bp->hresp_err_tasklet); + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + if (!pm_runtime_suspended(&pdev->dev)) { +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c +index aca95f64bde8..9b7a8db9860f 100644 +--- a/drivers/net/ethernet/google/gve/gve_main.c ++++ b/drivers/net/ethernet/google/gve/gve_main.c +@@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, + } + + qpl->id = id; +- qpl->num_entries = pages; ++ qpl->num_entries = 0; + qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); + /* caller handles clean up */ + if (!qpl->pages) +@@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, + /* caller handles clean up */ + if (err) + return -ENOMEM; ++ qpl->num_entries++; + } + priv->num_registered_pages += pages; + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index c33c438850cc..1d67eeeab79d 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -1516,6 +1516,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); ++ tp->dev->wol_enabled = wolopts ? 1 : 0; + } + + static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +@@ -4118,7 +4119,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + r8168dp_hw_jumbo_enable(tp); + break; +- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: ++ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + r8168e_hw_jumbo_enable(tp); + break; + default: +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 34fc59bd1e20..05631d97eeb4 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, + } + spin_unlock(&port->bc_queue.lock); + ++ schedule_work(&port->bc_work); ++ + if (err) + goto free_nskb; + +- schedule_work(&port->bc_work); + return; + + free_nskb: +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index dbacb0031877..229e480179ff 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -62,8 +62,8 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev) + struct reset_control *reset = NULL; + + if (mdiodev->dev.of_node) +- reset = devm_reset_control_get_exclusive(&mdiodev->dev, +- "phy"); ++ reset = of_reset_control_get_exclusive(mdiodev->dev.of_node, ++ "phy"); + if (IS_ERR(reset)) { + if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP) + reset = NULL; +@@ -107,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) + if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) + return -EINVAL; + ++ reset_control_put(mdiodev->reset_ctrl); ++ + mdiodev->bus->mdio_map[mdiodev->addr] = NULL; + + return 0; +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c +index 4d479e3c817d..2a91c192659f 100644 +--- a/drivers/net/slip/slip.c ++++ b/drivers/net/slip/slip.c +@@ -855,6 +855,7 @@ err_free_chan: + sl->tty = NULL; + tty->disc_data = NULL; + clear_bit(SLF_INUSE, &sl->flags); ++ sl_free_netdev(sl->dev); + free_netdev(sl->dev); + + err_exit: +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c +index 6bcbbb375401..9579a706fc08 100644 +--- a/drivers/platform/x86/hp-wmi.c ++++ b/drivers/platform/x86/hp-wmi.c +@@ -65,7 +65,7 @@ struct bios_args { + u32 command; + u32 commandtype; + u32 datasize; +- u32 data; ++ u8 data[128]; + }; + + enum hp_wmi_commandtype { +@@ -216,7 +216,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, + .command = command, + .commandtype = query, + .datasize = insize, +- .data = 0, ++ .data = { 0 }, + }; + struct acpi_buffer input = { sizeof(struct bios_args), &args }; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; +@@ -228,7 +228,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, + + if (WARN_ON(insize > sizeof(args.data))) + return -EINVAL; +- memcpy(&args.data, buffer, insize); ++ memcpy(&args.data[0], buffer, insize); + + wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output); + +@@ -380,7 +380,7 @@ static int hp_wmi_rfkill2_refresh(void) + int err, i; + + err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, +- 0, sizeof(state)); ++ sizeof(state), sizeof(state)); + if (err) + return err; + +@@ -778,7 +778,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) + int err, i; + + err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, +- 0, sizeof(state)); ++ sizeof(state), sizeof(state)); + if (err) + return err < 0 ? err : -EINVAL; + +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +index f932cb15e4e5..c702ee9691b1 100644 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +@@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, + memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); + skb_push(skb, priv->rtllib->tx_headroom); + ret = _rtl92e_tx(dev, skb); +- if (ret != 0) +- kfree_skb(skb); + + if (queue_index != MGNT_QUEUE) { + priv->rtllib->stats.tx_bytes += (skb->len - + priv->rtllib->tx_headroom); + priv->rtllib->stats.tx_packets++; + } ++ ++ if (ret != 0) ++ kfree_skb(skb); + } + + static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +index d3784c44f6d0..3784a27641a6 100644 +--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c ++++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +@@ -18,18 +18,13 @@ + static const struct sdio_device_id sdio_ids[] = + { + { SDIO_DEVICE(0x024c, 0x0523), }, ++ { SDIO_DEVICE(0x024c, 0x0525), }, + { SDIO_DEVICE(0x024c, 0x0623), }, + { SDIO_DEVICE(0x024c, 0x0626), }, + { SDIO_DEVICE(0x024c, 0xb723), }, + { /* end: all zeroes */ }, + }; +-static const struct acpi_device_id acpi_ids[] = { +- {"OBDA8723", 0x0000}, +- {} +-}; +- + MODULE_DEVICE_TABLE(sdio, sdio_ids); +-MODULE_DEVICE_TABLE(acpi, acpi_ids); + + static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id); + static void rtw_dev_remove(struct sdio_func *func); +diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c +index f2b7d5a1be17..d3d9ea284816 100644 +--- a/drivers/staging/wilc1000/wilc_hif.c ++++ b/drivers/staging/wilc1000/wilc_hif.c +@@ -477,16 +477,21 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, + memcpy(¶m->supp_rates[1], rates_ie + 2, rates_len); + } + +- supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data, +- ies->len); +- if (supp_rates_ie) { +- if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len)) +- param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED; +- else +- param->supp_rates[0] += supp_rates_ie[1]; +- +- memcpy(¶m->supp_rates[rates_len + 1], supp_rates_ie + 2, +- (param->supp_rates[0] - rates_len)); ++ if (rates_len < WILC_MAX_RATES_SUPPORTED) { ++ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ++ ies->data, ies->len); ++ if (supp_rates_ie) { ++ u8 ext_rates = supp_rates_ie[1]; ++ ++ if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len)) ++ param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED; ++ else ++ param->supp_rates[0] += ext_rates; ++ ++ memcpy(¶m->supp_rates[rates_len + 1], ++ supp_rates_ie + 2, ++ (param->supp_rates[0] - rates_len)); ++ } + } + + ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len); +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index 5ea8db667e83..c5974c9af841 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw) + + static int nvm_authenticate_host(struct tb_switch *sw) + { +- int ret; ++ int ret = 0; + + /* + * Root switch NVM upgrade requires that we disconnect the +@@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw) + * already). + */ + if (!sw->safe_mode) { ++ u32 status; ++ + ret = tb_domain_disconnect_all_paths(sw->tb); + if (ret) + return ret; +@@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw) + * everything goes well so getting timeout is expected. + */ + ret = dma_port_flash_update_auth(sw->dma_port); +- return ret == -ETIMEDOUT ? 0 : ret; ++ if (!ret || ret == -ETIMEDOUT) ++ return 0; ++ ++ /* ++ * Any error from update auth operation requires power ++ * cycling of the host router. ++ */ ++ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); ++ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) ++ nvm_set_auth_status(sw, status); + } + + /* +@@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw) + * switch. + */ + dma_port_power_cycle(sw->dma_port); +- return 0; ++ return ret; + } + + static int nvm_authenticate_device(struct tb_switch *sw) +@@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw) + int ret, retries = 10; + + ret = dma_port_flash_update_auth(sw->dma_port); +- if (ret && ret != -ETIMEDOUT) ++ switch (ret) { ++ case 0: ++ case -ETIMEDOUT: ++ case -EACCES: ++ case -EINVAL: ++ /* Power cycle is required */ ++ break; ++ default: + return ret; ++ } + + /* + * Poll here for the authentication status. It takes some time +@@ -1246,8 +1265,6 @@ static ssize_t nvm_authenticate_store(struct device *dev, + */ + nvm_authenticate_start(sw); + ret = nvm_authenticate_host(sw); +- if (ret) +- nvm_authenticate_complete(sw); + } else { + ret = nvm_authenticate_device(sw); + } +@@ -1690,13 +1707,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) + int ret; + + switch (sw->generation) { +- case 3: +- break; +- + case 2: + /* Only root switch can be upgraded */ + if (tb_route(sw)) + return 0; ++ ++ /* fallthrough */ ++ case 3: ++ ret = tb_switch_set_uuid(sw); ++ if (ret) ++ return ret; + break; + + default: +@@ -1720,6 +1740,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) + if (sw->no_nvm_upgrade) + return 0; + ++ /* ++ * If there is status already set then authentication failed ++ * when the dma_port_flash_update_auth() returned. Power cycling ++ * is not needed (it was done already) so only thing we do here ++ * is to unblock runtime PM of the root port. ++ */ ++ nvm_get_auth_status(sw, &status); ++ if (status) { ++ if (!tb_route(sw)) ++ nvm_authenticate_complete(sw); ++ return 0; ++ } ++ + /* + * Check status of the previous flash authentication. If there + * is one we need to power cycle the switch in any case to make +@@ -1735,9 +1768,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); +- ret = tb_switch_set_uuid(sw); +- if (ret) +- return ret; + nvm_set_auth_status(sw, status); + } + +diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c +index 8e41d70fd298..78a4925aa118 100644 +--- a/drivers/usb/dwc2/core.c ++++ b/drivers/usb/dwc2/core.c +@@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait) + greset |= GRSTCTL_CSFTRST; + dwc2_writel(hsotg, greset, GRSTCTL); + +- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) { ++ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) { + dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n", + __func__); + return -EBUSY; +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 25e81faf4c24..9ad44a96dfe3 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = { + /* Sienna devices */ + { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, + { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, ++ /* U-Blox devices */ ++ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, ++ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 22d66217cb41..e8373528264c 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -1558,3 +1558,10 @@ + */ + #define UNJO_VID 0x22B7 + #define UNJO_ISODEBUG_V1_PID 0x150D ++ ++/* ++ * U-Blox products (http://www.u-blox.com). ++ */ ++#define UBLOX_VID 0x1546 ++#define UBLOX_C099F9P_ZED_PID 0x0502 ++#define UBLOX_C099F9P_ODIN_PID 0x0503 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 516faa280ced..d691d1783ed6 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -5912,8 +5912,23 @@ static int __ext4_expand_extra_isize(struct inode *inode, + { + struct ext4_inode *raw_inode; + struct ext4_xattr_ibody_header *header; ++ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); ++ struct ext4_inode_info *ei = EXT4_I(inode); + int error; + ++ /* this was checked at iget time, but double check for good measure */ ++ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || ++ (ei->i_extra_isize & 3)) { ++ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)", ++ ei->i_extra_isize, ++ EXT4_INODE_SIZE(inode->i_sb)); ++ return -EFSCORRUPTED; ++ } ++ if ((new_extra_isize < ei->i_extra_isize) || ++ (new_extra_isize < 4) || ++ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) ++ return -EINVAL; /* Should never happen */ ++ + raw_inode = ext4_raw_inode(iloc); + + header = IHDR(inode, raw_inode); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index dd654e53ba3d..73578359d451 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3555,12 +3555,15 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; ++ unsigned def_extra_isize = sizeof(struct ext4_inode) - ++ EXT4_GOOD_OLD_INODE_SIZE; + +- /* determine the minimum size of new large inodes, if present */ +- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && +- sbi->s_want_extra_isize == 0) { +- sbi->s_want_extra_isize = sizeof(struct ext4_inode) - +- EXT4_GOOD_OLD_INODE_SIZE; ++ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) { ++ sbi->s_want_extra_isize = 0; ++ return; ++ } ++ if (sbi->s_want_extra_isize < 4) { ++ sbi->s_want_extra_isize = def_extra_isize; + if (ext4_has_feature_extra_isize(sb)) { + if (sbi->s_want_extra_isize < + le16_to_cpu(es->s_want_extra_isize)) +@@ -3573,10 +3576,10 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb) + } + } + /* Check if enough inode space is available */ +- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > +- sbi->s_inode_size) { +- sbi->s_want_extra_isize = sizeof(struct ext4_inode) - +- EXT4_GOOD_OLD_INODE_SIZE; ++ if ((sbi->s_want_extra_isize > sbi->s_inode_size) || ++ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > ++ sbi->s_inode_size)) { ++ sbi->s_want_extra_isize = def_extra_isize; + ext4_msg(sb, KERN_INFO, + "required extra inode space not available"); + } +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 2c819c3c855d..cbe8dabb6479 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -238,6 +238,8 @@ struct io_ring_ctx { + + struct user_struct *user; + ++ struct cred *creds; ++ + struct completion ctx_done; + + struct { +@@ -1752,8 +1754,11 @@ static void io_poll_complete_work(struct work_struct *work) + struct io_poll_iocb *poll = &req->poll; + struct poll_table_struct pt = { ._key = poll->events }; + struct io_ring_ctx *ctx = req->ctx; ++ const struct cred *old_cred; + __poll_t mask = 0; + ++ old_cred = override_creds(ctx->creds); ++ + if (!READ_ONCE(poll->canceled)) + mask = vfs_poll(poll->file, &pt) & poll->events; + +@@ -1768,7 +1773,7 @@ static void io_poll_complete_work(struct work_struct *work) + if (!mask && !READ_ONCE(poll->canceled)) { + add_wait_queue(poll->head, &poll->wait); + spin_unlock_irq(&ctx->completion_lock); +- return; ++ goto out; + } + list_del_init(&req->list); + io_poll_complete(ctx, req, mask); +@@ -1776,6 +1781,8 @@ static void io_poll_complete_work(struct work_struct *work) + + io_cqring_ev_posted(ctx); + io_put_req(req); ++out: ++ revert_creds(old_cred); + } + + static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, +@@ -2147,10 +2154,12 @@ static void io_sq_wq_submit_work(struct work_struct *work) + struct io_ring_ctx *ctx = req->ctx; + struct mm_struct *cur_mm = NULL; + struct async_list *async_list; ++ const struct cred *old_cred; + LIST_HEAD(req_list); + mm_segment_t old_fs; + int ret; + ++ old_cred = override_creds(ctx->creds); + async_list = io_async_list_from_sqe(ctx, req->submit.sqe); + restart: + do { +@@ -2258,6 +2267,7 @@ out: + unuse_mm(cur_mm); + mmput(cur_mm); + } ++ revert_creds(old_cred); + } + + /* +@@ -2663,6 +2673,7 @@ static int io_sq_thread(void *data) + { + struct io_ring_ctx *ctx = data; + struct mm_struct *cur_mm = NULL; ++ const struct cred *old_cred; + mm_segment_t old_fs; + DEFINE_WAIT(wait); + unsigned inflight; +@@ -2672,6 +2683,7 @@ static int io_sq_thread(void *data) + + old_fs = get_fs(); + set_fs(USER_DS); ++ old_cred = override_creds(ctx->creds); + + timeout = inflight = 0; + while (!kthread_should_park()) { +@@ -2782,6 +2794,7 @@ static int io_sq_thread(void *data) + unuse_mm(cur_mm); + mmput(cur_mm); + } ++ revert_creds(old_cred); + + kthread_parkme(); + +@@ -3567,6 +3580,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) + io_unaccount_mem(ctx->user, + ring_pages(ctx->sq_entries, ctx->cq_entries)); + free_uid(ctx->user); ++ if (ctx->creds) ++ put_cred(ctx->creds); + kfree(ctx); + } + +@@ -3838,6 +3853,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) + ctx->account_mem = account_mem; + ctx->user = user; + ++ ctx->creds = prepare_creds(); ++ if (!ctx->creds) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ + ret = io_allocate_scq_urings(ctx, p); + if (ret) + goto err; +diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c +index 021a4a2190ee..b86c78d178c6 100644 +--- a/fs/jffs2/nodelist.c ++++ b/fs/jffs2/nodelist.c +@@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r + lastend = this->ofs + this->size; + } else { + dbg_fragtree2("lookup gave no frag\n"); +- return -EINVAL; ++ lastend = 0; + } + + /* See if we ran off the end of the fragtree */ +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h +index ce7055259877..da4caff7efa4 100644 +--- a/include/linux/skmsg.h ++++ b/include/linux/skmsg.h +@@ -14,6 +14,7 @@ + #include <net/strparser.h> + + #define MAX_MSG_FRAGS MAX_SKB_FRAGS ++#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) + + enum __sk_action { + __SK_DROP = 0, +@@ -29,11 +30,13 @@ struct sk_msg_sg { + u32 size; + u32 copybreak; + bool copy[MAX_MSG_FRAGS]; +- /* The extra element is used for chaining the front and sections when +- * the list becomes partitioned (e.g. end < start). The crypto APIs +- * require the chaining. ++ /* The extra two elements: ++ * 1) used for chaining the front and sections when the list becomes ++ * partitioned (e.g. end < start). The crypto APIs require the ++ * chaining; ++ * 2) to chain tailer SG entries after the message. + */ +- struct scatterlist data[MAX_MSG_FRAGS + 1]; ++ struct scatterlist data[MAX_MSG_FRAGS + 2]; + }; + + /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ +@@ -141,13 +144,13 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) + + static inline u32 sk_msg_iter_dist(u32 start, u32 end) + { +- return end >= start ? end - start : end + (MAX_MSG_FRAGS - start); ++ return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); + } + + #define sk_msg_iter_var_prev(var) \ + do { \ + if (var == 0) \ +- var = MAX_MSG_FRAGS - 1; \ ++ var = NR_MSG_FRAG_IDS - 1; \ + else \ + var--; \ + } while (0) +@@ -155,7 +158,7 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end) + #define sk_msg_iter_var_next(var) \ + do { \ + var++; \ +- if (var == MAX_MSG_FRAGS) \ ++ if (var == NR_MSG_FRAG_IDS) \ + var = 0; \ + } while (0) + +@@ -172,9 +175,9 @@ static inline void sk_msg_clear_meta(struct sk_msg *msg) + + static inline void sk_msg_init(struct sk_msg *msg) + { +- BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS); ++ BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); + memset(msg, 0, sizeof(*msg)); +- sg_init_marker(msg->sg.data, MAX_MSG_FRAGS); ++ sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); + } + + static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, +@@ -195,14 +198,11 @@ static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) + + static inline bool sk_msg_full(const struct sk_msg *msg) + { +- return (msg->sg.end == msg->sg.start) && msg->sg.size; ++ return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; + } + + static inline u32 sk_msg_elem_used(const struct sk_msg *msg) + { +- if (sk_msg_full(msg)) +- return MAX_MSG_FRAGS; +- + return sk_msg_iter_dist(msg->sg.start, msg->sg.end); + } + +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h +index 503fbc3cd819..2b6f3f13d5bc 100644 +--- a/include/net/sctp/structs.h ++++ b/include/net/sctp/structs.h +@@ -1239,6 +1239,9 @@ struct sctp_ep_common { + /* What socket does this endpoint belong to? */ + struct sock *sk; + ++ /* Cache netns and it won't change once set */ ++ struct net *net; ++ + /* This is where we receive inbound chunks. */ + struct sctp_inq inqueue; + +diff --git a/include/net/tls.h b/include/net/tls.h +index f4ad831eaa02..093abb5a3dff 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -122,7 +122,6 @@ struct tls_rec { + struct list_head list; + int tx_ready; + int tx_flags; +- int inplace_crypto; + + struct sk_msg msg_plaintext; + struct sk_msg msg_encrypted; +@@ -396,7 +395,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, + int flags); + int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + int flags); +-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); ++void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); + + static inline struct tls_msg *tls_msg(struct sk_buff *skb) + { +diff --git a/net/core/filter.c b/net/core/filter.c +index 3fed5755494b..6d0111bfdb4a 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2299,7 +2299,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, + WARN_ON_ONCE(last_sge == first_sge); + shift = last_sge > first_sge ? + last_sge - first_sge - 1 : +- MAX_SKB_FRAGS - first_sge + last_sge - 1; ++ NR_MSG_FRAG_IDS - first_sge + last_sge - 1; + if (!shift) + goto out; + +@@ -2308,8 +2308,8 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, + do { + u32 move_from; + +- if (i + shift >= MAX_MSG_FRAGS) +- move_from = i + shift - MAX_MSG_FRAGS; ++ if (i + shift >= NR_MSG_FRAG_IDS) ++ move_from = i + shift - NR_MSG_FRAG_IDS; + else + move_from = i + shift; + if (move_from == msg->sg.end) +@@ -2323,7 +2323,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, + } while (1); + + msg->sg.end = msg->sg.end - shift > msg->sg.end ? +- msg->sg.end - shift + MAX_MSG_FRAGS : ++ msg->sg.end - shift + NR_MSG_FRAG_IDS : + msg->sg.end - shift; + out: + msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; +diff --git a/net/core/skmsg.c b/net/core/skmsg.c +index ad31e4e53d0a..0675d022584e 100644 +--- a/net/core/skmsg.c ++++ b/net/core/skmsg.c +@@ -421,7 +421,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) + copied = skb->len; + msg->sg.start = 0; + msg->sg.size = copied; +- msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge; ++ msg->sg.end = num_sge; + msg->skb = skb; + + sk_psock_queue_msg(psock, msg); +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c +index 8a56e09cfb0e..e38705165ac9 100644 +--- a/net/ipv4/tcp_bpf.c ++++ b/net/ipv4/tcp_bpf.c +@@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); + static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg, int *copied, int flags) + { +- bool cork = false, enospc = msg->sg.start == msg->sg.end; ++ bool cork = false, enospc = sk_msg_full(msg); + struct sock *sk_redir; + u32 tosend, delta = 0; + int ret; +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c +index d8c364d637b1..23f67b8fdeaa 100644 +--- a/net/openvswitch/datapath.c ++++ b/net/openvswitch/datapath.c +@@ -704,9 +704,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, + { + size_t len = NLMSG_ALIGN(sizeof(struct ovs_header)); + +- /* OVS_FLOW_ATTR_UFID */ ++ /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback ++ * see ovs_nla_put_identifier() ++ */ + if (sfid && ovs_identifier_is_ufid(sfid)) + len += nla_total_size(sfid->ufid_len); ++ else ++ len += nla_total_size(ovs_key_attr_size()); + + /* OVS_FLOW_ATTR_KEY */ + if (!sfid || should_fill_key(sfid, ufid_flags)) +@@ -882,7 +886,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, + retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, + info->snd_portid, info->snd_seq, 0, + cmd, ufid_flags); +- BUG_ON(retval < 0); ++ if (WARN_ON_ONCE(retval < 0)) { ++ kfree_skb(skb); ++ skb = ERR_PTR(retval); ++ } + return skb; + } + +@@ -1346,7 +1353,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) + OVS_FLOW_CMD_DEL, + ufid_flags); + rcu_read_unlock(); +- BUG_ON(err < 0); ++ if (WARN_ON_ONCE(err < 0)) { ++ kfree_skb(reply); ++ goto out_free; ++ } + + ovs_notify(&dp_flow_genl_family, reply, info); + } else { +@@ -1354,6 +1364,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) + } + } + ++out_free: + ovs_flow_free(flow, true); + return 0; + unlock: +diff --git a/net/psample/psample.c b/net/psample/psample.c +index a6ceb0533b5b..6f2fbc6b9eb2 100644 +--- a/net/psample/psample.c ++++ b/net/psample/psample.c +@@ -229,7 +229,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, + data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN + - NLA_ALIGNTO; + +- nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC); ++ nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC); + if (unlikely(!nl_skb)) + return; + +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c +index 0d578333e967..278c0b2dc523 100644 +--- a/net/sched/sch_mq.c ++++ b/net/sched/sch_mq.c +@@ -245,7 +245,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct netdev_queue *dev_queue = mq_queue_get(sch, cl); + + sch = dev_queue->qdisc_sleeping; +- if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || ++ if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats, ++ &sch->bstats) < 0 || + qdisc_qstats_copy(d, sch) < 0) + return -1; + return 0; +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c +index 46980b8d66c5..0d0113a24962 100644 +--- a/net/sched/sch_mqprio.c ++++ b/net/sched/sch_mqprio.c +@@ -557,8 +557,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); + + sch = dev_queue->qdisc_sleeping; +- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), +- d, NULL, &sch->bstats) < 0 || ++ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, ++ sch->cpu_bstats, &sch->bstats) < 0 || + qdisc_qstats_copy(d, sch) < 0) + return -1; + } +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c +index b2b7fdb06fc6..1330ad224931 100644 +--- a/net/sched/sch_multiq.c ++++ b/net/sched/sch_multiq.c +@@ -339,7 +339,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, + + cl_q = q->queues[cl - 1]; + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), +- d, NULL, &cl_q->bstats) < 0 || ++ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || + qdisc_qstats_copy(d, cl_q) < 0) + return -1; + +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c +index 0f8fedb8809a..18b884cfdfe8 100644 +--- a/net/sched/sch_prio.c ++++ b/net/sched/sch_prio.c +@@ -356,7 +356,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + + cl_q = q->queues[cl - 1]; + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), +- d, NULL, &cl_q->bstats) < 0 || ++ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || + qdisc_qstats_copy(d, cl_q) < 0) + return -1; + +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index d2ffc9a0ba3a..41839b85c268 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -64,6 +64,7 @@ static struct sctp_association *sctp_association_init( + /* Discarding const is appropriate here. */ + asoc->ep = (struct sctp_endpoint *)ep; + asoc->base.sk = (struct sock *)sk; ++ asoc->base.net = sock_net(sk); + + sctp_endpoint_hold(asoc->ep); + sock_hold(asoc->base.sk); +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c +index ea53049d1db6..3067deb0fbec 100644 +--- a/net/sctp/endpointola.c ++++ b/net/sctp/endpointola.c +@@ -110,6 +110,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, + + /* Remember who we are attached to. */ + ep->base.sk = sk; ++ ep->base.net = sock_net(sk); + sock_hold(ep->base.sk); + + return ep; +diff --git a/net/sctp/input.c b/net/sctp/input.c +index 2277981559d0..4d2bcfc9d7f8 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -882,7 +882,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, + if (!sctp_transport_hold(t)) + return err; + +- if (!net_eq(sock_net(t->asoc->base.sk), x->net)) ++ if (!net_eq(t->asoc->base.net, x->net)) + goto out; + if (x->lport != htons(t->asoc->base.bind_addr.port)) + goto out; +@@ -897,7 +897,7 @@ static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed) + { + const struct sctp_transport *t = data; + +- return sctp_hashfn(sock_net(t->asoc->base.sk), ++ return sctp_hashfn(t->asoc->base.net, + htons(t->asoc->base.bind_addr.port), + &t->ipaddr, seed); + } +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 0c21c52fc408..4ab8208a2dd4 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -2160,8 +2160,10 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook( + + /* Update socket peer label if first association. */ + if (security_sctp_assoc_request((struct sctp_endpoint *)ep, +- chunk->skb)) ++ chunk->skb)) { ++ sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); ++ } + + /* Set temp so that it won't be added into hashtable */ + new_asoc->temp = 1; +diff --git a/net/socket.c b/net/socket.c +index 6a9ab7a8b1d2..d7a106028f0e 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -2232,15 +2232,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, + return err < 0 ? err : 0; + } + +-static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, +- struct msghdr *msg_sys, unsigned int flags, +- struct used_address *used_address, +- unsigned int allowed_msghdr_flags) ++static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys, ++ unsigned int flags, struct used_address *used_address, ++ unsigned int allowed_msghdr_flags) + { +- struct compat_msghdr __user *msg_compat = +- (struct compat_msghdr __user *)msg; +- struct sockaddr_storage address; +- struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + unsigned char ctl[sizeof(struct cmsghdr) + 20] + __aligned(sizeof(__kernel_size_t)); + /* 20 is size of ipv6_pktinfo */ +@@ -2248,19 +2243,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, + int ctl_len; + ssize_t err; + +- msg_sys->msg_name = &address; +- +- if (MSG_CMSG_COMPAT & flags) +- err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); +- else +- err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); +- if (err < 0) +- return err; +- + err = -ENOBUFS; + + if (msg_sys->msg_controllen > INT_MAX) +- goto out_freeiov; ++ goto out; + flags |= (msg_sys->msg_flags & allowed_msghdr_flags); + ctl_len = msg_sys->msg_controllen; + if ((MSG_CMSG_COMPAT & flags) && ctl_len) { +@@ -2268,7 +2254,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, + cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, + sizeof(ctl)); + if (err) +- goto out_freeiov; ++ goto out; + ctl_buf = msg_sys->msg_control; + ctl_len = msg_sys->msg_controllen; + } else if (ctl_len) { +@@ -2277,7 +2263,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, + if (ctl_len > sizeof(ctl)) { + ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); + if (ctl_buf == NULL) +- goto out_freeiov; ++ goto out; + } + err = -EFAULT; + /* +@@ -2323,7 +2309,47 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, + out_freectl: + if (ctl_buf != ctl) + sock_kfree_s(sock->sk, ctl_buf, ctl_len); +-out_freeiov: ++out: ++ return err; ++} ++ ++static int sendmsg_copy_msghdr(struct msghdr *msg, ++ struct user_msghdr __user *umsg, unsigned flags, ++ struct iovec **iov) ++{ ++ int err; ++ ++ if (flags & MSG_CMSG_COMPAT) { ++ struct compat_msghdr __user *msg_compat; ++ ++ msg_compat = (struct compat_msghdr __user *) umsg; ++ err = get_compat_msghdr(msg, msg_compat, NULL, iov); ++ } else { ++ err = copy_msghdr_from_user(msg, umsg, NULL, iov); ++ } ++ if (err < 0) ++ return err; ++ ++ return 0; ++} ++ ++static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, ++ struct msghdr *msg_sys, unsigned int flags, ++ struct used_address *used_address, ++ unsigned int allowed_msghdr_flags) ++{ ++ struct sockaddr_storage address; ++ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; ++ ssize_t err; ++ ++ msg_sys->msg_name = &address; ++ ++ err = sendmsg_copy_msghdr(msg_sys, msg, flags, &iov); ++ if (err < 0) ++ return err; ++ ++ err = ____sys_sendmsg(sock, msg_sys, flags, used_address, ++ allowed_msghdr_flags); + kfree(iov); + return err; + } +@@ -2331,12 +2357,27 @@ out_freeiov: + /* + * BSD sendmsg interface + */ +-long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *msg, ++long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, + unsigned int flags) + { +- struct msghdr msg_sys; ++ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; ++ struct sockaddr_storage address; ++ struct msghdr msg = { .msg_name = &address }; ++ ssize_t err; ++ ++ err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov); ++ if (err) ++ return err; ++ /* disallow ancillary data requests from this path */ ++ if (msg.msg_control || msg.msg_controllen) { ++ err = -EINVAL; ++ goto out; ++ } + +- return ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0); ++ err = ____sys_sendmsg(sock, &msg, flags, NULL, 0); ++out: ++ kfree(iov); ++ return err; + } + + long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, +@@ -2442,33 +2483,41 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, + return __sys_sendmmsg(fd, mmsg, vlen, flags, true); + } + +-static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, +- struct msghdr *msg_sys, unsigned int flags, int nosec) ++static int recvmsg_copy_msghdr(struct msghdr *msg, ++ struct user_msghdr __user *umsg, unsigned flags, ++ struct sockaddr __user **uaddr, ++ struct iovec **iov) + { +- struct compat_msghdr __user *msg_compat = +- (struct compat_msghdr __user *)msg; +- struct iovec iovstack[UIO_FASTIOV]; +- struct iovec *iov = iovstack; +- unsigned long cmsg_ptr; +- int len; + ssize_t err; + +- /* kernel mode address */ +- struct sockaddr_storage addr; ++ if (MSG_CMSG_COMPAT & flags) { ++ struct compat_msghdr __user *msg_compat; + +- /* user mode address pointers */ +- struct sockaddr __user *uaddr; +- int __user *uaddr_len = COMPAT_NAMELEN(msg); +- +- msg_sys->msg_name = &addr; +- +- if (MSG_CMSG_COMPAT & flags) +- err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); +- else +- err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); ++ msg_compat = (struct compat_msghdr __user *) umsg; ++ err = get_compat_msghdr(msg, msg_compat, uaddr, iov); ++ } else { ++ err = copy_msghdr_from_user(msg, umsg, uaddr, iov); ++ } + if (err < 0) + return err; + ++ return 0; ++} ++ ++static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys, ++ struct user_msghdr __user *msg, ++ struct sockaddr __user *uaddr, ++ unsigned int flags, int nosec) ++{ ++ struct compat_msghdr __user *msg_compat = ++ (struct compat_msghdr __user *) msg; ++ int __user *uaddr_len = COMPAT_NAMELEN(msg); ++ struct sockaddr_storage addr; ++ unsigned long cmsg_ptr; ++ int len; ++ ssize_t err; ++ ++ msg_sys->msg_name = &addr; + cmsg_ptr = (unsigned long)msg_sys->msg_control; + msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); + +@@ -2479,7 +2528,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, + flags |= MSG_DONTWAIT; + err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags); + if (err < 0) +- goto out_freeiov; ++ goto out; + len = err; + + if (uaddr != NULL) { +@@ -2487,12 +2536,12 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, + msg_sys->msg_namelen, uaddr, + uaddr_len); + if (err < 0) +- goto out_freeiov; ++ goto out; + } + err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), + COMPAT_FLAGS(msg)); + if (err) +- goto out_freeiov; ++ goto out; + if (MSG_CMSG_COMPAT & flags) + err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, + &msg_compat->msg_controllen); +@@ -2500,10 +2549,25 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, + err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, + &msg->msg_controllen); + if (err) +- goto out_freeiov; ++ goto out; + err = len; ++out: ++ return err; ++} ++ ++static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, ++ struct msghdr *msg_sys, unsigned int flags, int nosec) ++{ ++ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; ++ /* user mode address pointers */ ++ struct sockaddr __user *uaddr; ++ ssize_t err; ++ ++ err = recvmsg_copy_msghdr(msg_sys, msg, flags, &uaddr, &iov); ++ if (err < 0) ++ return err; + +-out_freeiov: ++ err = ____sys_recvmsg(sock, msg_sys, msg, uaddr, flags, nosec); + kfree(iov); + return err; + } +@@ -2512,12 +2576,28 @@ out_freeiov: + * BSD recvmsg interface + */ + +-long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *msg, ++long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, + unsigned int flags) + { +- struct msghdr msg_sys; ++ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; ++ struct sockaddr_storage address; ++ struct msghdr msg = { .msg_name = &address }; ++ struct sockaddr __user *uaddr; ++ ssize_t err; ++ ++ err = recvmsg_copy_msghdr(&msg, umsg, flags, &uaddr, &iov); ++ if (err) ++ return err; ++ /* disallow ancillary data requests from this path */ ++ if (msg.msg_control || msg.msg_controllen) { ++ err = -EINVAL; ++ goto out; ++ } + +- return ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); ++ err = ____sys_recvmsg(sock, &msg, umsg, uaddr, flags, 0); ++out: ++ kfree(iov); ++ return err; + } + + long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index e135d4e11231..d4d2928424e2 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -550,7 +550,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, + if (len <= 0) + return -EINVAL; + +- len = min_t(int, len, TIPC_MAX_BEARER_NAME); ++ len = min_t(int, len, TIPC_MAX_LINK_NAME); + if (!string_is_valid(name, len)) + return -EINVAL; + +@@ -822,7 +822,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, + if (len <= 0) + return -EINVAL; + +- len = min_t(int, len, TIPC_MAX_BEARER_NAME); ++ len = min_t(int, len, TIPC_MAX_LINK_NAME); + if (!string_is_valid(name, len)) + return -EINVAL; + +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index f874cc0da45d..eff444293594 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -209,24 +209,15 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + return tls_push_sg(sk, ctx, sg, offset, flags); + } + +-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) ++void tls_free_partial_record(struct sock *sk, struct tls_context *ctx) + { + struct scatterlist *sg; + +- sg = ctx->partially_sent_record; +- if (!sg) +- return false; +- +- while (1) { ++ for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) { + put_page(sg_page(sg)); + sk_mem_uncharge(sk, sg->length); +- +- if (sg_is_last(sg)) +- break; +- sg++; + } + ctx->partially_sent_record = NULL; +- return true; + } + + static void tls_write_space(struct sock *sk) +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 319735d5c084..5dd0f01913c0 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -705,8 +705,7 @@ static int tls_push_record(struct sock *sk, int flags, + } + + i = msg_pl->sg.start; +- sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? +- &msg_en->sg.data[i] : &msg_pl->sg.data[i]); ++ sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); + + i = msg_en->sg.end; + sk_msg_iter_var_prev(i); +@@ -766,8 +765,14 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, + + policy = !(flags & MSG_SENDPAGE_NOPOLICY); + psock = sk_psock_get(sk); +- if (!psock || !policy) +- return tls_push_record(sk, flags, record_type); ++ if (!psock || !policy) { ++ err = tls_push_record(sk, flags, record_type); ++ if (err) { ++ *copied -= sk_msg_free(sk, msg); ++ tls_free_open_rec(sk); ++ } ++ return err; ++ } + more_data: + enospc = sk_msg_full(msg); + if (psock->eval == __SK_NONE) { +@@ -965,8 +970,6 @@ alloc_encrypted: + if (ret) + goto fallback_to_reg_send; + +- rec->inplace_crypto = 0; +- + num_zc++; + copied += try_to_copy; + +@@ -979,7 +982,7 @@ alloc_encrypted: + num_async++; + else if (ret == -ENOMEM) + goto wait_for_memory; +- else if (ret == -ENOSPC) ++ else if (ctx->open_rec && ret == -ENOSPC) + goto rollback_iter; + else if (ret != -EAGAIN) + goto send_end; +@@ -1048,11 +1051,12 @@ wait_for_memory: + ret = sk_stream_wait_memory(sk, &timeo); + if (ret) { + trim_sgl: +- tls_trim_both_msgs(sk, orig_size); ++ if (ctx->open_rec) ++ tls_trim_both_msgs(sk, orig_size); + goto send_end; + } + +- if (msg_en->sg.size < required_size) ++ if (ctx->open_rec && msg_en->sg.size < required_size) + goto alloc_encrypted; + } + +@@ -1164,7 +1168,6 @@ alloc_payload: + + tls_ctx->pending_open_record_frags = true; + if (full_record || eor || sk_msg_full(msg_pl)) { +- rec->inplace_crypto = 0; + ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, + record_type, &copied, flags); + if (ret) { +@@ -1185,11 +1188,13 @@ wait_for_sndbuf: + wait_for_memory: + ret = sk_stream_wait_memory(sk, &timeo); + if (ret) { +- tls_trim_both_msgs(sk, msg_pl->sg.size); ++ if (ctx->open_rec) ++ tls_trim_both_msgs(sk, msg_pl->sg.size); + goto sendpage_end; + } + +- goto alloc_payload; ++ if (ctx->open_rec) ++ goto alloc_payload; + } + + if (num_async) { +@@ -2079,7 +2084,8 @@ void tls_sw_release_resources_tx(struct sock *sk) + /* Free up un-sent records in tx_list. First, free + * the partially sent record if any at head of tx_list. + */ +- if (tls_free_partial_record(sk, tls_ctx)) { ++ if (tls_ctx->partially_sent_record) { ++ tls_free_partial_record(sk, tls_ctx); + rec = list_first_entry(&ctx->tx_list, + struct tls_rec, list); + list_del(&rec->list); +diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c +index 3845144e2c91..4a851513c842 100644 +--- a/tools/testing/selftests/bpf/test_sockmap.c ++++ b/tools/testing/selftests/bpf/test_sockmap.c +@@ -240,14 +240,14 @@ static int sockmap_init_sockets(int verbose) + addr.sin_port = htons(S1_PORT); + err = bind(s1, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0) { +- perror("bind s1 failed()\n"); ++ perror("bind s1 failed()"); + return errno; + } + + addr.sin_port = htons(S2_PORT); + err = bind(s2, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0) { +- perror("bind s2 failed()\n"); ++ perror("bind s2 failed()"); + return errno; + } + +@@ -255,14 +255,14 @@ static int sockmap_init_sockets(int verbose) + addr.sin_port = htons(S1_PORT); + err = listen(s1, 32); + if (err < 0) { +- perror("listen s1 failed()\n"); ++ perror("listen s1 failed()"); + return errno; + } + + addr.sin_port = htons(S2_PORT); + err = listen(s2, 32); + if (err < 0) { +- perror("listen s1 failed()\n"); ++ perror("listen s1 failed()"); + return errno; + } + +@@ -270,14 +270,14 @@ static int sockmap_init_sockets(int verbose) + addr.sin_port = htons(S1_PORT); + err = connect(c1, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0 && errno != EINPROGRESS) { +- perror("connect c1 failed()\n"); ++ perror("connect c1 failed()"); + return errno; + } + + addr.sin_port = htons(S2_PORT); + err = connect(c2, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0 && errno != EINPROGRESS) { +- perror("connect c2 failed()\n"); ++ perror("connect c2 failed()"); + return errno; + } else if (err < 0) { + err = 0; +@@ -286,13 +286,13 @@ static int sockmap_init_sockets(int verbose) + /* Accept Connecrtions */ + p1 = accept(s1, NULL, NULL); + if (p1 < 0) { +- perror("accept s1 failed()\n"); ++ perror("accept s1 failed()"); + return errno; + } + + p2 = accept(s2, NULL, NULL); + if (p2 < 0) { +- perror("accept s1 failed()\n"); ++ perror("accept s1 failed()"); + return errno; + } + +@@ -332,6 +332,10 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, + int i, fp; + + file = fopen(".sendpage_tst.tmp", "w+"); ++ if (!file) { ++ perror("create file for sendpage"); ++ return 1; ++ } + for (i = 0; i < iov_length * cnt; i++, k++) + fwrite(&k, sizeof(char), 1, file); + fflush(file); +@@ -339,12 +343,17 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, + fclose(file); + + fp = open(".sendpage_tst.tmp", O_RDONLY); ++ if (fp < 0) { ++ perror("reopen file for sendpage"); ++ return 1; ++ } ++ + clock_gettime(CLOCK_MONOTONIC, &s->start); + for (i = 0; i < cnt; i++) { + int sent = sendfile(fd, fp, NULL, iov_length); + + if (!drop && sent < 0) { +- perror("send loop error:"); ++ perror("send loop error"); + close(fp); + return sent; + } else if (drop && sent >= 0) { +@@ -463,7 +472,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, + int sent = sendmsg(fd, &msg, flags); + + if (!drop && sent < 0) { +- perror("send loop error:"); ++ perror("send loop error"); + goto out_errno; + } else if (drop && sent >= 0) { + printf("send loop error expected: %i\n", sent); +@@ -499,7 +508,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, + total_bytes -= txmsg_pop_total; + err = clock_gettime(CLOCK_MONOTONIC, &s->start); + if (err < 0) +- perror("recv start time: "); ++ perror("recv start time"); + while (s->bytes_recvd < total_bytes) { + if (txmsg_cork) { + timeout.tv_sec = 0; +@@ -543,7 +552,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, + if (recv < 0) { + if (errno != EWOULDBLOCK) { + clock_gettime(CLOCK_MONOTONIC, &s->end); +- perror("recv failed()\n"); ++ perror("recv failed()"); + goto out_errno; + } + } +@@ -557,7 +566,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, + + errno = msg_verify_data(&msg, recv, chunk_sz); + if (errno) { +- perror("data verify msg failed\n"); ++ perror("data verify msg failed"); + goto out_errno; + } + if (recvp) { +@@ -565,7 +574,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, + recvp, + chunk_sz); + if (errno) { +- perror("data verify msg_peek failed\n"); ++ perror("data verify msg_peek failed"); + goto out_errno; + } + } +@@ -654,7 +663,7 @@ static int sendmsg_test(struct sockmap_options *opt) + err = 0; + exit(err ? 1 : 0); + } else if (rxpid == -1) { +- perror("msg_loop_rx: "); ++ perror("msg_loop_rx"); + return errno; + } + +@@ -681,7 +690,7 @@ static int sendmsg_test(struct sockmap_options *opt) + s.bytes_recvd, recvd_Bps, recvd_Bps/giga); + exit(err ? 1 : 0); + } else if (txpid == -1) { +- perror("msg_loop_tx: "); ++ perror("msg_loop_tx"); + return errno; + } + +@@ -715,7 +724,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) + /* Ping/Pong data from client to server */ + sc = send(c1, buf, sizeof(buf), 0); + if (sc < 0) { +- perror("send failed()\n"); ++ perror("send failed()"); + return sc; + } + +@@ -748,7 +757,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) + rc = recv(i, buf, sizeof(buf), 0); + if (rc < 0) { + if (errno != EWOULDBLOCK) { +- perror("recv failed()\n"); ++ perror("recv failed()"); + return rc; + } + } +@@ -760,7 +769,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt) + + sc = send(i, buf, rc, 0); + if (sc < 0) { +- perror("send failed()\n"); ++ perror("send failed()"); + return sc; + } + } +diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c +index d60a343b1371..842d9155d36c 100644 +--- a/tools/testing/selftests/bpf/xdping.c ++++ b/tools/testing/selftests/bpf/xdping.c +@@ -45,7 +45,7 @@ static int get_stats(int fd, __u16 count, __u32 raddr) + printf("\nXDP RTT data:\n"); + + if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) { +- perror("bpf_map_lookup elem: "); ++ perror("bpf_map_lookup elem"); + return 1; + } + +diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh +index ab367e75f095..d697815d2785 100755 +--- a/tools/testing/selftests/net/pmtu.sh ++++ b/tools/testing/selftests/net/pmtu.sh +@@ -1249,8 +1249,7 @@ test_list_flush_ipv4_exception() { + done + run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}" + +- # Each exception is printed as two lines +- if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then ++ if [ "$(${ns_a} ip -oneline route list cache | wc -l)" -ne 101 ]; then + err " can't list cached exceptions" + fail=1 + fi +@@ -1300,7 +1299,7 @@ test_list_flush_ipv6_exception() { + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst_prefix1}${i}" + done + run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst2}" +- if [ "$(${ns_a} ip -6 route list cache | wc -l)" -ne 101 ]; then ++ if [ "$(${ns_a} ip -oneline -6 route list cache | wc -l)" -ne 101 ]; then + err " can't list cached exceptions" + fail=1 + fi +diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c +index 1c8f194d6556..46abcae47dee 100644 +--- a/tools/testing/selftests/net/tls.c ++++ b/tools/testing/selftests/net/tls.c +@@ -268,6 +268,38 @@ TEST_F(tls, sendmsg_single) + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); + } + ++#define MAX_FRAGS 64 ++#define SEND_LEN 13 ++TEST_F(tls, sendmsg_fragmented) ++{ ++ char const *test_str = "test_sendmsg"; ++ char buf[SEND_LEN * MAX_FRAGS]; ++ struct iovec vec[MAX_FRAGS]; ++ struct msghdr msg; ++ int i, frags; ++ ++ for (frags = 1; frags <= MAX_FRAGS; frags++) { ++ for (i = 0; i < frags; i++) { ++ vec[i].iov_base = (char *)test_str; ++ vec[i].iov_len = SEND_LEN; ++ } ++ ++ memset(&msg, 0, sizeof(struct msghdr)); ++ msg.msg_iov = vec; ++ msg.msg_iovlen = frags; ++ ++ EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags); ++ EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL), ++ SEND_LEN * frags); ++ ++ for (i = 0; i < frags; i++) ++ EXPECT_EQ(memcmp(buf + SEND_LEN * i, ++ test_str, SEND_LEN), 0); ++ } ++} ++#undef MAX_FRAGS ++#undef SEND_LEN ++ + TEST_F(tls, sendmsg_large) + { + void *mem = malloc(16384); +@@ -694,6 +726,34 @@ TEST_F(tls, recv_lowat) + EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0); + } + ++TEST_F(tls, recv_rcvbuf) ++{ ++ char send_mem[4096]; ++ char recv_mem[4096]; ++ int rcv_buf = 1024; ++ ++ memset(send_mem, 0x1c, sizeof(send_mem)); ++ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF, ++ &rcv_buf, sizeof(rcv_buf)), 0); ++ ++ EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512); ++ memset(recv_mem, 0, sizeof(recv_mem)); ++ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512); ++ EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0); ++ ++ if (self->notls) ++ return; ++ ++ EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096); ++ memset(recv_mem, 0, sizeof(recv_mem)); ++ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1); ++ EXPECT_EQ(errno, EMSGSIZE); ++ ++ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1); ++ EXPECT_EQ(errno, EMSGSIZE); ++} ++ + TEST_F(tls, bidir) + { + char const *test_str = "test_read"; |