diff options
author | 2018-05-20 18:21:29 -0400 | |
---|---|---|
committer | 2018-11-14 09:00:40 -0500 | |
commit | cded637725196d59135dbfd7372f8ca87fcbb54a (patch) | |
tree | 592e794d58c17a85e90c22148c8e2bff12f73154 /1041_linux-4.14.42.patch | |
parent | Linux patch 4.14.41 (diff) | |
download | linux-patches-cded637725196d59135dbfd7372f8ca87fcbb54a.tar.gz linux-patches-cded637725196d59135dbfd7372f8ca87fcbb54a.tar.bz2 linux-patches-cded637725196d59135dbfd7372f8ca87fcbb54a.zip |
Linux patch 4.14.42
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1041_linux-4.14.42.patch')
-rw-r--r-- | 1041_linux-4.14.42.patch | 1534 |
1 files changed, 1534 insertions, 0 deletions
diff --git a/1041_linux-4.14.42.patch b/1041_linux-4.14.42.patch new file mode 100644 index 00000000..5d73cf3b --- /dev/null +++ b/1041_linux-4.14.42.patch @@ -0,0 +1,1534 @@ +diff --git a/Makefile b/Makefile +index c23d0b0c6c45..777f5685a36b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 41 ++SUBLEVEL = 42 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c +index c02cc817a490..60666db31886 100644 +--- a/drivers/net/bonding/bond_alb.c ++++ b/drivers/net/bonding/bond_alb.c +@@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) + { + int i; + +- if (!client_info->slave) ++ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) + return; + + for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { +@@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], + skb->priority = TC_PRIO_CONTROL; + skb->dev = slave->dev; + ++ netdev_dbg(slave->bond->dev, ++ "Send learning packet: dev %s mac %pM vlan %d\n", ++ slave->dev->name, mac_addr, vid); ++ + if (vid) + __vlan_hwaccel_put_tag(skb, vlan_proto, vid); + +@@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) + u8 *mac_addr = data->mac_addr; + struct bond_vlan_tag *tags; + +- if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { +- if (strict_match && +- ether_addr_equal_64bits(mac_addr, +- upper->dev_addr)) { ++ if (is_vlan_dev(upper) && ++ bond->nest_level == vlan_get_encap_level(upper) - 1) { ++ if (upper->addr_assign_type == NET_ADDR_STOLEN) { + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); +- } else if (!strict_match) { ++ } else { + alb_send_lp_vid(slave, upper->dev_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index bf3be2e6d4a8..f0aa57222f17 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1734,6 +1734,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, NULL); + ++ bond->nest_level = dev_get_nest_level(bond_dev); ++ + netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", + slave_dev->name, + bond_is_active_slave(new_slave) ? "an active" : "a backup", +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 48738eb27806..9a8ef630466f 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -8723,14 +8723,15 @@ static void tg3_free_consistent(struct tg3 *tp) + tg3_mem_rx_release(tp); + tg3_mem_tx_release(tp); + +- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ +- tg3_full_lock(tp, 0); ++ /* tp->hw_stats can be referenced safely: ++ * 1. under rtnl_lock ++ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. ++ */ + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } +- tg3_full_unlock(tp); + } + + /* +@@ -14167,7 +14168,7 @@ static void tg3_get_stats64(struct net_device *dev, + struct tg3 *tp = netdev_priv(dev); + + spin_lock_bh(&tp->lock); +- if (!tp->hw_stats) { ++ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { + *stats = tp->net_stats_prev; + spin_unlock_bh(&tp->lock); + return; +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index 67f74fcb265e..5fe56dc4cfae 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev, + if (!coal->tx_max_coalesced_frames_irq) + return -EINVAL; + ++ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || ++ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || ++ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME || ++ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) { ++ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n", ++ __func__, MLX4_EN_MAX_COAL_TIME); ++ return -ERANGE; ++ } ++ ++ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS || ++ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) { ++ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n", ++ __func__, MLX4_EN_MAX_COAL_PKTS); ++ return -ERANGE; ++ } ++ + priv->rx_frames = (coal->rx_max_coalesced_frames == + MLX4_EN_AUTO_CONF) ? + MLX4_EN_RX_COAL_TARGET : +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index c097eef41a9c..faa4bd21f148 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -3318,12 +3318,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_ring[t]) { + err = -ENOMEM; +- goto err_free_tx; ++ goto out; + } + priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_cq[t]) { +- kfree(priv->tx_ring[t]); + err = -ENOMEM; + goto out; + } +@@ -3576,11 +3575,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + + return 0; + +-err_free_tx: +- while (t--) { +- kfree(priv->tx_ring[t]); +- kfree(priv->tx_cq[t]); +- } + out: + mlx4_en_destroy_netdev(dev); + return err; +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +index 2c1a5ff6acfa..09f4764a3f39 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -131,6 +131,9 @@ + #define MLX4_EN_TX_COAL_PKTS 16 + #define MLX4_EN_TX_COAL_TIME 0x10 + ++#define MLX4_EN_MAX_COAL_PKTS U16_MAX ++#define MLX4_EN_MAX_COAL_TIME U16_MAX ++ + #define MLX4_EN_RX_RATE_LOW 400000 + #define MLX4_EN_RX_COAL_TIME_LOW 0 + #define MLX4_EN_RX_RATE_HIGH 450000 +@@ -547,8 +550,8 @@ struct mlx4_en_priv { + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; +- u16 sample_interval; +- u16 adaptive_rx_coal; ++ u32 sample_interval; ++ u32 adaptive_rx_coal; + u32 msg_enable; + u32 loopback_ok; + u32 validate_loopback; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 42bab73a9f40..ede66e6af786 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -780,6 +780,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + f->mask); + addr_type = key->addr_type; + ++ /* the HW doesn't support frag first/later */ ++ if (mask->flags & FLOW_DIS_FIRST_FRAG) ++ return -EOPNOTSUPP; ++ + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, +@@ -1383,7 +1387,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, + } + + ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); +- if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { ++ if (modify_ip_header && ip_proto != IPPROTO_TCP && ++ ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { + pr_info("can't offload re-write of ip proto %d\n", ip_proto); + return false; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index eea7f931cad3..d560047c0bf9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -234,7 +234,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, + dma_addr = dma_map_single(sq->pdev, skb_data, headlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) +- return -ENOMEM; ++ goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; +@@ -252,7 +252,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, + dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) +- return -ENOMEM; ++ goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; +@@ -264,6 +264,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, + } + + return num_dma; ++ ++dma_unmap_wqe_err: ++ mlx5e_dma_unmap_wqe_err(sq, num_dma); ++ return -ENOMEM; + } + + static inline void +@@ -355,17 +359,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, + (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + if (unlikely(num_dma < 0)) +- goto dma_unmap_wqe_err; ++ goto err_drop; + + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, + num_bytes, num_dma, wi, cseg); + + return NETDEV_TX_OK; + +-dma_unmap_wqe_err: ++err_drop: + sq->stats.dropped++; +- mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); +- + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +@@ -594,17 +596,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, + (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + if (unlikely(num_dma < 0)) +- goto dma_unmap_wqe_err; ++ goto err_drop; + + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, + num_bytes, num_dma, wi, cseg); + + return NETDEV_TX_OK; + +-dma_unmap_wqe_err: ++err_drop: + sq->stats.dropped++; +- mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); +- + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index c77f4c0c7769..82e37250ed01 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2054,26 +2054,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, + memset(vf_stats, 0, sizeof(*vf_stats)); + vf_stats->rx_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets) + ++ MLX5_GET_CTR(out, received_ib_unicast.packets) + + MLX5_GET_CTR(out, received_eth_multicast.packets) + ++ MLX5_GET_CTR(out, received_ib_multicast.packets) + + MLX5_GET_CTR(out, received_eth_broadcast.packets); + + vf_stats->rx_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets) + ++ MLX5_GET_CTR(out, received_ib_unicast.octets) + + MLX5_GET_CTR(out, received_eth_multicast.octets) + ++ MLX5_GET_CTR(out, received_ib_multicast.octets) + + MLX5_GET_CTR(out, received_eth_broadcast.octets); + + vf_stats->tx_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + ++ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + ++ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + + vf_stats->tx_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + ++ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + ++ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + vf_stats->multicast = +- MLX5_GET_CTR(out, received_eth_multicast.packets); ++ MLX5_GET_CTR(out, received_eth_multicast.packets) + ++ MLX5_GET_CTR(out, received_ib_multicast.packets); + + vf_stats->broadcast = + MLX5_GET_CTR(out, received_eth_broadcast.packets); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 5a7bea688ec8..33e5ff081e36 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -174,6 +174,7 @@ static void del_flow_group(struct fs_node *node); + static void del_fte(struct fs_node *node); + static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, + struct mlx5_flow_destination *d2); ++static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); + static struct mlx5_flow_rule * + find_flow_rule(struct fs_fte *fte, + struct mlx5_flow_destination *dest); +@@ -2041,23 +2042,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) + + static int init_root_ns(struct mlx5_flow_steering *steering) + { ++ int err; ++ + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); + if (!steering->root_ns) +- goto cleanup; ++ return -ENOMEM; + +- if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) +- goto cleanup; ++ err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); ++ if (err) ++ goto out_err; + + set_prio_attrs(steering->root_ns); +- +- if (create_anchor_flow_table(steering)) +- goto cleanup; ++ err = create_anchor_flow_table(steering); ++ if (err) ++ goto out_err; + + return 0; + +-cleanup: +- mlx5_cleanup_fs(steering->dev); +- return -ENOMEM; ++out_err: ++ cleanup_root_ns(steering->root_ns); ++ steering->root_ns = NULL; ++ return err; + } + + static void clean_tree(struct fs_node *node) +diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c +index d24b47b8e0b2..d118da5a10a2 100644 +--- a/drivers/net/ethernet/realtek/8139too.c ++++ b/drivers/net/ethernet/realtek/8139too.c +@@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index db553d4e8d22..b98fcc9e93e5 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -4886,6 +4886,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) + static void rtl_pll_power_up(struct rtl8169_private *tp) + { + rtl_generic_op(tp, tp->pll_power_ops.up); ++ ++ /* give MAC/PHY some time to resume */ ++ msleep(20); + } + + static void rtl_init_pll_power_ops(struct rtl8169_private *tp) +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c +index 6a4e8e1bbd90..e92f41d20a2c 100644 +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, + + len = (val & RCR_ENTRY_L2_LEN) >> + RCR_ENTRY_L2_LEN_SHIFT; +- len -= ETH_FCS_LEN; ++ append_size = len + ETH_HLEN + ETH_FCS_LEN; + + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; +@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, + RCR_ENTRY_PKTBUFSZ_SHIFT]; + + off = addr & ~PAGE_MASK; +- append_size = rcr_size; + if (num_rcr == 1) { + int ptype; + +@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, + else + skb_checksum_none_assert(skb); + } else if (!(val & RCR_ENTRY_MULTI)) +- append_size = len - skb->len; ++ append_size = append_size - skb->len; + + niu_rx_skb_append(skb, page, off, append_size, rcr_size); + if ((page->index + rp->rbr_block_size) - rcr_size == addr) { +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index 992c43b1868f..8cb44eabc283 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -1260,6 +1260,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN | + ALE_SECURE, slave->port_vlan); ++ cpsw_ale_control_set(cpsw->ale, slave_port, ++ ALE_PORT_DROP_UNKNOWN_VLAN, 1); + } + + static void soft_reset_slave(struct cpsw_slave *slave) +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index c849de3cb046..444e560d928b 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -1742,7 +1742,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev, + goto rx_handler_failed; + } + +- ret = netdev_upper_dev_link(vf_netdev, ndev); ++ ret = netdev_master_upper_dev_link(vf_netdev, ndev, ++ NULL, NULL); + if (ret != 0) { + netdev_err(vf_netdev, + "can not set master device %s (err = %d)\n", +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 1aad0568dcc6..2f828eb9ace6 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1338,6 +1338,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, + id->driver_info = (unsigned long)&qmi_wwan_info; + } + ++ /* There are devices where the same interface number can be ++ * configured as different functions. We should only bind to ++ * vendor specific functions when matching on interface number ++ */ ++ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && ++ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { ++ dev_dbg(&intf->dev, ++ "Rejecting interface number match for class %02x\n", ++ desc->bInterfaceClass); ++ return -ENODEV; ++ } ++ + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ + if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { + dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c +index c0a4fcb7fd0a..3696f9ded252 100644 +--- a/drivers/scsi/aacraid/commsup.c ++++ b/drivers/scsi/aacraid/commsup.c +@@ -752,6 +752,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, + int wait; + unsigned long flags = 0; + unsigned long mflags = 0; ++ struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) ++ fibptr->hw_fib_va; + + fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); + if (callback) { +@@ -762,11 +764,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, + wait = 1; + + +- if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { +- struct aac_hba_cmd_req *hbacmd = +- (struct aac_hba_cmd_req *)fibptr->hw_fib_va; ++ hbacmd->iu_type = command; + +- hbacmd->iu_type = command; ++ if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { + /* bit1 of request_id must be 0 */ + hbacmd->request_id = + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index d227d8514b25..1bc62294fe6b 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -3171,7 +3171,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root, + struct btrfs_transaction *cur_trans; + int ret = 0; + ++ spin_lock(&root->fs_info->trans_lock); + cur_trans = root->fs_info->running_transaction; ++ if (cur_trans) ++ refcount_inc(&cur_trans->use_count); ++ spin_unlock(&root->fs_info->trans_lock); + if (!cur_trans) + return 0; + +@@ -3180,6 +3184,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, + head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); + if (!head) { + spin_unlock(&delayed_refs->lock); ++ btrfs_put_transaction(cur_trans); + return 0; + } + +@@ -3196,6 +3201,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, + mutex_lock(&head->mutex); + mutex_unlock(&head->mutex); + btrfs_put_delayed_ref(&head->node); ++ btrfs_put_transaction(cur_trans); + return -EAGAIN; + } + spin_unlock(&delayed_refs->lock); +@@ -3223,6 +3229,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, + } + spin_unlock(&head->lock); + mutex_unlock(&head->mutex); ++ btrfs_put_transaction(cur_trans); + return ret; + } + +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 2ff11a693360..dd9d4d3a2e39 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -263,7 +263,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, + * Inherently racy -- command line shares address space + * with code and data. + */ +- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); ++ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON); + if (rv <= 0) + goto out_free_page; + +@@ -281,7 +281,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, + int nr_read; + + _count = min3(count, len, PAGE_SIZE); +- nr_read = access_remote_vm(mm, p, page, _count, 0); ++ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); + if (nr_read < 0) + rv = nr_read; + if (nr_read <= 0) +@@ -327,7 +327,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, + bool final; + + _count = min3(count, len, PAGE_SIZE); +- nr_read = access_remote_vm(mm, p, page, _count, 0); ++ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); + if (nr_read < 0) + rv = nr_read; + if (nr_read <= 0) +@@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, + max_len = min_t(size_t, PAGE_SIZE, count); + this_len = min(max_len, this_len); + +- retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); ++ retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON); + + if (retval <= 0) { + ret = retval; +diff --git a/include/linux/mm.h b/include/linux/mm.h +index f50deada0f5c..f23215854c80 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2383,6 +2383,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, + #define FOLL_MLOCK 0x1000 /* lock present pages */ + #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ + #define FOLL_COW 0x4000 /* internal GUP flag */ ++#define FOLL_ANON 0x8000 /* don't do file mappings */ + + static inline int vm_fault_to_errno(int vm_fault, int foll_flags) + { +diff --git a/include/net/bonding.h b/include/net/bonding.h +index b2e68657a216..73799da57400 100644 +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -198,6 +198,7 @@ struct bonding { + struct slave __rcu *primary_slave; + struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ + bool force_primary; ++ u32 nest_level; + s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ + int (*recv_probe)(const struct sk_buff *, struct bonding *, + struct slave *); +diff --git a/include/net/tls.h b/include/net/tls.h +index df950383b8c1..48940a883d9a 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -98,6 +98,7 @@ struct tls_context { + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + unsigned long flags; ++ bool in_tcp_sendpages; + + u16 pending_open_record_frags; + int (*push_pending_record)(struct sock *sk, int flags); +diff --git a/mm/gup.c b/mm/gup.c +index 8fc23a60487d..d2ba0be71441 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) + if (vm_flags & (VM_IO | VM_PFNMAP)) + return -EFAULT; + ++ if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) ++ return -EFAULT; ++ + if (write) { + if (!(vm_flags & VM_WRITE)) { + if (!(gup_flags & FOLL_FORCE)) +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c +index f3aef22931ab..55a73ef388bf 100644 +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -503,8 +503,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) + return -ELOOP; + +- /* Device is already being bridged */ +- if (br_port_exists(dev)) ++ /* Device has master upper dev */ ++ if (netdev_master_upper_dev_get(dev)) + return -EBUSY; + + /* No bridging devices that dislike that (e.g. wireless) */ +diff --git a/net/compat.c b/net/compat.c +index 22381719718c..32ed993588d6 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname, + optname == SO_ATTACH_REUSEPORT_CBPF) + return do_set_attach_filter(sock, level, optname, + optval, optlen); +- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) ++ if (!COMPAT_USE_64BIT_TIME && ++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) + return do_set_sock_timeout(sock, level, optname, optval, optlen); + + return sock_setsockopt(sock, level, optname, optval, optlen); +@@ -442,7 +443,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, + static int compat_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) + { +- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) ++ if (!COMPAT_USE_64BIT_TIME && ++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) + return do_get_sock_timeout(sock, level, optname, optval, optlen); + return sock_getsockopt(sock, level, optname, optval, optlen); + } +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c +index 97791b0b1b51..3887bc115762 100644 +--- a/net/dccp/ccids/ccid2.c ++++ b/net/dccp/ccids/ccid2.c +@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val) + DCCPF_SEQ_WMAX)); + } + ++static void dccp_tasklet_schedule(struct sock *sk) ++{ ++ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; ++ ++ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { ++ sock_hold(sk); ++ __tasklet_schedule(t); ++ } ++} ++ + static void ccid2_hc_tx_rto_expire(unsigned long data) + { + struct sock *sk = (struct sock *)data; +@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) + + /* if we were blocked before, we may now send cwnd=1 packet */ + if (sender_was_blocked) +- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); ++ dccp_tasklet_schedule(sk); + /* restart backed-off timer */ + sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); + out: +@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) + done: + /* check if incoming Acks allow pending packets to be sent */ + if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) +- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); ++ dccp_tasklet_schedule(sk); + dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); + } + +diff --git a/net/dccp/timer.c b/net/dccp/timer.c +index 3a2c34027758..2a952cbd6efa 100644 +--- a/net/dccp/timer.c ++++ b/net/dccp/timer.c +@@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data) + else + dccp_write_xmit(sk); + bh_unlock_sock(sk); ++ sock_put(sk); + } + + static void dccp_write_xmit_timer(unsigned long data) + { + dccp_write_xmitlet(data); +- sock_put((struct sock *)data); + } + + void dccp_init_xmit_timers(struct sock *sk) +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index b8f0db54b197..16226d49263d 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + ipc.addr = faddr = daddr; + + if (ipc.opt && ipc.opt->opt.srr) { +- if (!daddr) +- return -EINVAL; ++ if (!daddr) { ++ err = -EINVAL; ++ goto out_free; ++ } + faddr = ipc.opt->opt.faddr; + } + tos = get_rttos(&ipc, inet); +@@ -842,6 +844,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + out: + ip_rt_put(rt); ++out_free: + if (free) + kfree(ipc.opt); + if (!err) { +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 5ea559f8c456..28bc3a98adc7 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -711,7 +711,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, + fnhe->fnhe_daddr = daddr; + fnhe->fnhe_gw = gw; + fnhe->fnhe_pmtu = pmtu; +- fnhe->fnhe_expires = expires; ++ fnhe->fnhe_expires = max(1UL, expires); + + /* Exception created; mark the cached routes for the nexthop + * stale, so anyone caching it rechecks if this exception +@@ -1286,6 +1286,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); + } + ++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) ++{ ++ struct fnhe_hash_bucket *hash; ++ struct fib_nh_exception *fnhe, __rcu **fnhe_p; ++ u32 hval = fnhe_hashfun(daddr); ++ ++ spin_lock_bh(&fnhe_lock); ++ ++ hash = rcu_dereference_protected(nh->nh_exceptions, ++ lockdep_is_held(&fnhe_lock)); ++ hash += hval; ++ ++ fnhe_p = &hash->chain; ++ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); ++ while (fnhe) { ++ if (fnhe->fnhe_daddr == daddr) { ++ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( ++ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); ++ fnhe_flush_routes(fnhe); ++ kfree_rcu(fnhe, rcu); ++ break; ++ } ++ fnhe_p = &fnhe->fnhe_next; ++ fnhe = rcu_dereference_protected(fnhe->fnhe_next, ++ lockdep_is_held(&fnhe_lock)); ++ } ++ ++ spin_unlock_bh(&fnhe_lock); ++} ++ + static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) + { + struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); +@@ -1299,8 +1329,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) + + for (fnhe = rcu_dereference(hash[hval].chain); fnhe; + fnhe = rcu_dereference(fnhe->fnhe_next)) { +- if (fnhe->fnhe_daddr == daddr) ++ if (fnhe->fnhe_daddr == daddr) { ++ if (fnhe->fnhe_expires && ++ time_after(jiffies, fnhe->fnhe_expires)) { ++ ip_del_fnhe(nh, daddr); ++ break; ++ } + return fnhe; ++ } + } + return NULL; + } +@@ -1620,36 +1656,6 @@ static void ip_handle_martian_source(struct net_device *dev, + #endif + } + +-static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) +-{ +- struct fnhe_hash_bucket *hash; +- struct fib_nh_exception *fnhe, __rcu **fnhe_p; +- u32 hval = fnhe_hashfun(daddr); +- +- spin_lock_bh(&fnhe_lock); +- +- hash = rcu_dereference_protected(nh->nh_exceptions, +- lockdep_is_held(&fnhe_lock)); +- hash += hval; +- +- fnhe_p = &hash->chain; +- fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); +- while (fnhe) { +- if (fnhe->fnhe_daddr == daddr) { +- rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( +- fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); +- fnhe_flush_routes(fnhe); +- kfree_rcu(fnhe, rcu); +- break; +- } +- fnhe_p = &fnhe->fnhe_next; +- fnhe = rcu_dereference_protected(fnhe->fnhe_next, +- lockdep_is_held(&fnhe_lock)); +- } +- +- spin_unlock_bh(&fnhe_lock); +-} +- + static void set_lwt_redirect(struct rtable *rth) + { + if (lwtunnel_output_redirect(rth->dst.lwtstate)) { +@@ -1716,20 +1722,10 @@ static int __mkroute_input(struct sk_buff *skb, + + fnhe = find_exception(&FIB_RES_NH(*res), daddr); + if (do_cache) { +- if (fnhe) { ++ if (fnhe) + rth = rcu_dereference(fnhe->fnhe_rth_input); +- if (rth && rth->dst.expires && +- time_after(jiffies, rth->dst.expires)) { +- ip_del_fnhe(&FIB_RES_NH(*res), daddr); +- fnhe = NULL; +- } else { +- goto rt_cache; +- } +- } +- +- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); +- +-rt_cache: ++ else ++ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); + if (rt_cache_valid(rth)) { + skb_dst_set_noref(skb, &rth->dst); + goto out; +@@ -2206,39 +2202,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res, + * the loopback interface and the IP_PKTINFO ipi_ifindex will + * be set to the loopback interface as well. + */ +- fi = NULL; ++ do_cache = false; + } + + fnhe = NULL; + do_cache &= fi != NULL; +- if (do_cache) { ++ if (fi) { + struct rtable __rcu **prth; + struct fib_nh *nh = &FIB_RES_NH(*res); + + fnhe = find_exception(nh, fl4->daddr); ++ if (!do_cache) ++ goto add; + if (fnhe) { + prth = &fnhe->fnhe_rth_output; +- rth = rcu_dereference(*prth); +- if (rth && rth->dst.expires && +- time_after(jiffies, rth->dst.expires)) { +- ip_del_fnhe(nh, fl4->daddr); +- fnhe = NULL; +- } else { +- goto rt_cache; ++ } else { ++ if (unlikely(fl4->flowi4_flags & ++ FLOWI_FLAG_KNOWN_NH && ++ !(nh->nh_gw && ++ nh->nh_scope == RT_SCOPE_LINK))) { ++ do_cache = false; ++ goto add; + } ++ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); + } +- +- if (unlikely(fl4->flowi4_flags & +- FLOWI_FLAG_KNOWN_NH && +- !(nh->nh_gw && +- nh->nh_scope == RT_SCOPE_LINK))) { +- do_cache = false; +- goto add; +- } +- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); + rth = rcu_dereference(*prth); +- +-rt_cache: + if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) + return rth; + } +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index b694fbf44a35..e3ece12f0250 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1194,7 +1194,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) + uarg->zerocopy = 0; + } + +- if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { ++ if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && ++ !tp->repair) { + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); + if (err == -EINPROGRESS && copied_syn > 0) + goto out; +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c +index 25c5a0b60cfc..9a0b952dd09b 100644 +--- a/net/ipv4/tcp_bbr.c ++++ b/net/ipv4/tcp_bbr.c +@@ -802,7 +802,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) + } + } + } +- bbr->idle_restart = 0; ++ /* Restart after idle ends only once we process a new S/ACK for data */ ++ if (rs->delivered > 0) ++ bbr->idle_restart = 0; + } + + static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index c79fa6f6b758..b0ad62bd38f7 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -413,9 +413,9 @@ static int compute_score(struct sock *sk, struct net *net, + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + +- if (exact_dif && !dev_match) ++ if (!dev_match) + return -1; +- if (sk->sk_bound_dev_if && dev_match) ++ if (sk->sk_bound_dev_if) + score += 4; + } + +@@ -978,8 +978,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); + + if (ipc.opt && ipc.opt->opt.srr) { +- if (!daddr) +- return -EINVAL; ++ if (!daddr) { ++ err = -EINVAL; ++ goto out_free; ++ } + faddr = ipc.opt->opt.faddr; + connected = 0; + } +@@ -1087,6 +1089,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + out: + ip_rt_put(rt); ++out_free: + if (free) + kfree(ipc.opt); + if (!err) +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index e04c534b573e..7d50d889ab6e 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1222,11 +1222,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb, + const struct ipv6hdr *inner_iph; + const struct icmp6hdr *icmph; + struct ipv6hdr _inner_iph; ++ struct icmp6hdr _icmph; + + if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) + goto out; + +- icmph = icmp6_hdr(skb); ++ icmph = skb_header_pointer(skb, skb_transport_offset(skb), ++ sizeof(_icmph), &_icmph); ++ if (!icmph) ++ goto out; ++ + if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && + icmph->icmp6_type != ICMPV6_PKT_TOOBIG && + icmph->icmp6_type != ICMPV6_TIME_EXCEED && +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 40d7234c27b9..0146dcdc5c40 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -164,9 +164,9 @@ static int compute_score(struct sock *sk, struct net *net, + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + +- if (exact_dif && !dev_match) ++ if (!dev_match) + return -1; +- if (sk->sk_bound_dev_if && dev_match) ++ if (sk->sk_bound_dev_if) + score++; + } + +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index fca69c3771f5..c28223d8092b 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -765,8 +765,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl + + if ((session->ifname[0] && + nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || +- (session->offset && +- nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) || + (session->cookie_len && + nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, + &session->cookie[0])) || +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index cf41d9b4a0b8..b49f5afab405 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + if (size > llc->dev->mtu) + size = llc->dev->mtu; + copied = size - hdrlen; ++ rc = -EINVAL; ++ if (copied < 0) ++ goto release; + release_sock(sk); + skb = sock_alloc_send_skb(sk, size, noblock, &rc); + lock_sock(sk); +diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c +index 58fb827439a8..6df6f58a8103 100644 +--- a/net/nsh/nsh.c ++++ b/net/nsh/nsh.c +@@ -30,6 +30,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, + if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) + goto out; + nsh_len = nsh_hdr_len(nsh_hdr(skb)); ++ if (nsh_len < NSH_BASE_HDR_LEN) ++ goto out; + if (unlikely(!pskb_may_pull(skb, nsh_len))) + goto out; + +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c +index 0d9f6afa266c..4c9c9458374a 100644 +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -1404,13 +1404,10 @@ static void nlattr_set(struct nlattr *attr, u8 val, + + /* The nlattr stream should already have been validated */ + nla_for_each_nested(nla, attr, rem) { +- if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { +- if (tbl[nla_type(nla)].next) +- tbl = tbl[nla_type(nla)].next; +- nlattr_set(nla, val, tbl); +- } else { ++ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) ++ nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); ++ else + memset(nla_data(nla), val, nla_len(nla)); +- } + + if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) + *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; +diff --git a/net/rds/recv.c b/net/rds/recv.c +index b25bcfe411ca..555f07ccf0dc 100644 +--- a/net/rds/recv.c ++++ b/net/rds/recv.c +@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, + struct rds_cmsg_rx_trace t; + int i, j; + ++ memset(&t, 0, sizeof(t)); + inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); + t.rx_traces = rs->rs_rx_traces; + for (i = 0; i < rs->rs_rx_traces; i++) { +diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c +index 6d10b3af479b..821823b2518a 100644 +--- a/net/sched/act_skbmod.c ++++ b/net/sched/act_skbmod.c +@@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, + if (exists && bind) + return 0; + +- if (!lflags) ++ if (!lflags) { ++ if (exists) ++ tcf_idr_release(*a, bind); + return -EINVAL; ++ } + + if (!exists) { + ret = tcf_idr_create(tn, parm->index, est, a, +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index c2fab4bcb8be..2f4e1483aced 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -151,8 +151,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, + } else { + err = -ENOENT; + } +- goto errout; + #endif ++ goto errout; + } + tp->classify = tp->ops->classify; + tp->protocol = protocol; +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c +index 263d16e3219e..f50eb87cfe79 100644 +--- a/net/sched/sch_fq.c ++++ b/net/sched/sch_fq.c +@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f) + return f->next == &detached; + } + ++static bool fq_flow_is_throttled(const struct fq_flow *f) ++{ ++ return f->next == &throttled; ++} ++ ++static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) ++{ ++ if (head->first) ++ head->last->next = flow; ++ else ++ head->first = flow; ++ head->last = flow; ++ flow->next = NULL; ++} ++ ++static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) ++{ ++ rb_erase(&f->rate_node, &q->delayed); ++ q->throttled_flows--; ++ fq_flow_add_tail(&q->old_flows, f); ++} ++ + static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) + { + struct rb_node **p = &q->delayed.rb_node, *parent = NULL; +@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) + + static struct kmem_cache *fq_flow_cachep __read_mostly; + +-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) +-{ +- if (head->first) +- head->last->next = flow; +- else +- head->first = flow; +- head->last = flow; +- flow->next = NULL; +-} + + /* limit number of collected flows per round */ + #define FQ_GC_MAX 8 +@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) + f->socket_hash != sk->sk_hash)) { + f->credit = q->initial_quantum; + f->socket_hash = sk->sk_hash; ++ if (fq_flow_is_throttled(f)) ++ fq_flow_unset_throttled(q, f); + f->time_next_packet = 0ULL; + } + return f; +@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) + q->time_next_delayed_flow = f->time_next_packet; + break; + } +- rb_erase(p, &q->delayed); +- q->throttled_flows--; +- fq_flow_add_tail(&q->old_flows, f); ++ fq_flow_unset_throttled(q, f); + } + } + +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index dfb9651e818b..58f7d8cfd748 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1025,8 +1025,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) + struct sctp_endpoint *ep; + struct sctp_chunk *chunk; + struct sctp_inq *inqueue; +- int state; ++ int first_time = 1; /* is this the first time through the loop */ + int error = 0; ++ int state; + + /* The association should be held so we should be safe. */ + ep = asoc->ep; +@@ -1037,6 +1038,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) + state = asoc->state; + subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + ++ /* If the first chunk in the packet is AUTH, do special ++ * processing specified in Section 6.3 of SCTP-AUTH spec ++ */ ++ if (first_time && subtype.chunk == SCTP_CID_AUTH) { ++ struct sctp_chunkhdr *next_hdr; ++ ++ next_hdr = sctp_inq_peek(inqueue); ++ if (!next_hdr) ++ goto normal; ++ ++ /* If the next chunk is COOKIE-ECHO, skip the AUTH ++ * chunk while saving a pointer to it so we can do ++ * Authentication later (during cookie-echo ++ * processing). ++ */ ++ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { ++ chunk->auth_chunk = skb_clone(chunk->skb, ++ GFP_ATOMIC); ++ chunk->auth = 1; ++ continue; ++ } ++ } ++ ++normal: + /* SCTP-AUTH, Section 6.3: + * The receiver has a list of chunk types which it expects + * to be received only after an AUTH-chunk. This list has +@@ -1075,6 +1100,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) + /* If there is an error on chunk, discard this packet. */ + if (error && chunk) + chunk->pdiscard = 1; ++ ++ if (first_time) ++ first_time = 0; + } + sctp_association_put(asoc); + } +diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c +index 48392552ee7c..1aa89d4682f4 100644 +--- a/net/sctp/inqueue.c ++++ b/net/sctp/inqueue.c +@@ -217,7 +217,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) + skb_pull(chunk->skb, sizeof(*ch)); + chunk->subh.v = NULL; /* Subheader is no longer valid. */ + +- if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { ++ if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { + /* This is not a singleton */ + chunk->singleton = 0; + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 7219a1c041f7..853fecdf6374 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -865,6 +865,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, + if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) + return 1; + ++ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) ++ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; ++ + return __sctp_v6_cmp_addr(addr1, addr2); + } + +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 8f8ccded13e4..01b078172306 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -150,10 +150,7 @@ static enum sctp_disposition sctp_sf_violation_chunk( + struct sctp_cmd_seq *commands); + + static enum sctp_ierror sctp_sf_authenticate( +- struct net *net, +- const struct sctp_endpoint *ep, + const struct sctp_association *asoc, +- const union sctp_subtype type, + struct sctp_chunk *chunk); + + static enum sctp_disposition __sctp_sf_do_9_1_abort( +@@ -618,6 +615,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, + return SCTP_DISPOSITION_CONSUME; + } + ++static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, ++ const struct sctp_association *asoc) ++{ ++ struct sctp_chunk auth; ++ ++ if (!chunk->auth_chunk) ++ return true; ++ ++ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo ++ * is supposed to be authenticated and we have to do delayed ++ * authentication. We've just recreated the association using ++ * the information in the cookie and now it's much easier to ++ * do the authentication. ++ */ ++ ++ /* Make sure that we and the peer are AUTH capable */ ++ if (!net->sctp.auth_enable || !asoc->peer.auth_capable) ++ return false; ++ ++ /* set-up our fake chunk so that we can process it */ ++ auth.skb = chunk->auth_chunk; ++ auth.asoc = chunk->asoc; ++ auth.sctp_hdr = chunk->sctp_hdr; ++ auth.chunk_hdr = (struct sctp_chunkhdr *) ++ skb_push(chunk->auth_chunk, ++ sizeof(struct sctp_chunkhdr)); ++ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); ++ auth.transport = chunk->transport; ++ ++ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR; ++} ++ + /* + * Respond to a normal COOKIE ECHO chunk. + * We are the side that is being asked for an association. +@@ -755,37 +784,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, + if (error) + goto nomem_init; + +- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo +- * is supposed to be authenticated and we have to do delayed +- * authentication. We've just recreated the association using +- * the information in the cookie and now it's much easier to +- * do the authentication. +- */ +- if (chunk->auth_chunk) { +- struct sctp_chunk auth; +- enum sctp_ierror ret; +- +- /* Make sure that we and the peer are AUTH capable */ +- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { +- sctp_association_free(new_asoc); +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); +- } +- +- /* set-up our fake chunk so that we can process it */ +- auth.skb = chunk->auth_chunk; +- auth.asoc = chunk->asoc; +- auth.sctp_hdr = chunk->sctp_hdr; +- auth.chunk_hdr = (struct sctp_chunkhdr *) +- skb_push(chunk->auth_chunk, +- sizeof(struct sctp_chunkhdr)); +- skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); +- auth.transport = chunk->transport; +- +- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); +- if (ret != SCTP_IERROR_NO_ERROR) { +- sctp_association_free(new_asoc); +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); +- } ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { ++ sctp_association_free(new_asoc); ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + repl = sctp_make_cookie_ack(new_asoc, chunk); +@@ -1755,13 +1756,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( + GFP_ATOMIC)) + goto nomem; + ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) ++ return SCTP_DISPOSITION_DISCARD; ++ + /* Make sure no new addresses are being added during the + * restart. Though this is a pretty complicated attack + * since you'd have to get inside the cookie. + */ +- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { ++ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) + return SCTP_DISPOSITION_CONSUME; +- } + + /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes + * the peer has restarted (Action A), it MUST NOT setup a new +@@ -1867,6 +1870,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b( + GFP_ATOMIC)) + goto nomem; + ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) ++ return SCTP_DISPOSITION_DISCARD; ++ + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, +@@ -1961,6 +1967,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( + * a COOKIE ACK. + */ + ++ if (!sctp_auth_chunk_verify(net, chunk, asoc)) ++ return SCTP_DISPOSITION_DISCARD; ++ + /* Don't accidentally move back into established state. */ + if (asoc->state < SCTP_STATE_ESTABLISHED) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, +@@ -2000,7 +2009,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( + } + } + +- repl = sctp_make_cookie_ack(new_asoc, chunk); ++ repl = sctp_make_cookie_ack(asoc, chunk); + if (!repl) + goto nomem; + +@@ -4111,10 +4120,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast( + * The return value is the disposition of the chunk. + */ + static enum sctp_ierror sctp_sf_authenticate( +- struct net *net, +- const struct sctp_endpoint *ep, + const struct sctp_association *asoc, +- const union sctp_subtype type, + struct sctp_chunk *chunk) + { + struct sctp_authhdr *auth_hdr; +@@ -4212,7 +4218,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, + commands); + + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; +- error = sctp_sf_authenticate(net, ep, asoc, type, chunk); ++ error = sctp_sf_authenticate(asoc, chunk); + switch (error) { + case SCTP_IERROR_AUTH_BAD_HMAC: + /* Generate the ERROR chunk and discard the rest +diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c +index 5447228bf1a0..8538c96c96c1 100644 +--- a/net/sctp/ulpevent.c ++++ b/net/sctp/ulpevent.c +@@ -717,7 +717,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, + return event; + + fail_mark: +- sctp_chunk_put(chunk); + kfree_skb(skb); + fail: + return NULL; +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index 282361ac0263..dfef930d1e50 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -87,6 +87,7 @@ int tls_push_sg(struct sock *sk, + size = sg->length - offset; + offset += sg->offset; + ++ ctx->in_tcp_sendpages = true; + while (1) { + if (sg_is_last(sg)) + sendpage_flags = flags; +@@ -107,6 +108,7 @@ int tls_push_sg(struct sock *sk, + offset -= sg->offset; + ctx->partially_sent_offset = offset; + ctx->partially_sent_record = (void *)sg; ++ ctx->in_tcp_sendpages = false; + return ret; + } + +@@ -121,6 +123,8 @@ int tls_push_sg(struct sock *sk, + } + + clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); ++ ctx->in_tcp_sendpages = false; ++ ctx->sk_write_space(sk); + + return 0; + } +@@ -190,6 +194,10 @@ static void tls_write_space(struct sock *sk) + { + struct tls_context *ctx = tls_get_ctx(sk); + ++ /* We are already sending pages, ignore notification */ ++ if (ctx->in_tcp_sendpages) ++ return; ++ + if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { + gfp_t sk_allocation = sk->sk_allocation; + int rc; +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 3f6f6f8c9fa5..5b2409746ae0 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -518,7 +518,7 @@ int xfrm_trans_queue(struct sk_buff *skb, + return -ENOBUFS; + + XFRM_TRANS_SKB_CB(skb)->finish = finish; +- skb_queue_tail(&trans->queue, skb); ++ __skb_queue_tail(&trans->queue, skb); + tasklet_schedule(&trans->tasklet); + return 0; + } +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 8f13fb57eab5..6c4ec69e11a0 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -1345,6 +1345,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, + + if (orig->aead) { + x->aead = xfrm_algo_aead_clone(orig->aead); ++ x->geniv = orig->geniv; + if (!x->aead) + goto error; + } |