diff options
author | 2017-05-03 13:44:57 -0400 | |
---|---|---|
committer | 2017-05-03 13:44:57 -0400 | |
commit | e5aa3de99df5f3ff814f0b0cdc8ea02c25dfb91f (patch) | |
tree | f6a434e3af03ac5521840979e46e6191247d6685 | |
parent | Linux patch 4.9.25 (diff) | |
download | linux-patches-e5aa3de99df5f3ff814f0b0cdc8ea02c25dfb91f.tar.gz linux-patches-e5aa3de99df5f3ff814f0b0cdc8ea02c25dfb91f.tar.bz2 linux-patches-e5aa3de99df5f3ff814f0b0cdc8ea02c25dfb91f.zip |
Linux patch 4.9.264.9-27
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1025_linux-4.9.26.patch | 1768 |
2 files changed, 1772 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 6d83bcd6..64923fda 100644 --- a/0000_README +++ b/0000_README @@ -143,6 +143,10 @@ Patch: 1024_linux-4.9.25.patch From: http://www.kernel.org Desc: Linux 4.9.25 +Patch: 1025_linux-4.9.26.patch +From: http://www.kernel.org +Desc: Linux 4.9.26 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1025_linux-4.9.26.patch b/1025_linux-4.9.26.patch new file mode 100644 index 00000000..0346b91b --- /dev/null +++ b/1025_linux-4.9.26.patch @@ -0,0 +1,1768 @@ +diff --git a/Makefile b/Makefile +index 8e18c63388c4..c09679c1a70d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 25 ++SUBLEVEL = 26 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h +index b65930a49589..54b54da6384c 100644 +--- a/arch/arc/include/asm/atomic.h ++++ b/arch/arc/include/asm/atomic.h +@@ -17,10 +17,11 @@ + #include <asm/barrier.h> + #include <asm/smp.h> + ++#define ATOMIC_INIT(i) { (i) } ++ + #ifndef CONFIG_ARC_PLAT_EZNPS + + #define atomic_read(v) READ_ONCE((v)->counter) +-#define ATOMIC_INIT(i) { (i) } + + #ifdef CONFIG_ARC_HAS_LLSC + +diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h +index b5ff87e6f4b7..aee1a77934cf 100644 +--- a/arch/arc/include/asm/entry-arcv2.h ++++ b/arch/arc/include/asm/entry-arcv2.h +@@ -16,6 +16,7 @@ + ; + ; Now manually save: r12, sp, fp, gp, r25 + ++ PUSH r30 + PUSH r12 + + ; Saving pt_regs->sp correctly requires some extra work due to the way +@@ -72,6 +73,7 @@ + POPAX AUX_USER_SP + 1: + POP r12 ++ POP r30 + + .endm + +diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h +index 69095da1fcfd..47111d565a95 100644 +--- a/arch/arc/include/asm/ptrace.h ++++ b/arch/arc/include/asm/ptrace.h +@@ -84,7 +84,7 @@ struct pt_regs { + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ + +- unsigned long r12; ++ unsigned long r12, r30; + + /*------- Below list auto saved by h/w -----------*/ + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; +diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c +index 804d2a2a19fe..dd6a18bc10ab 100644 +--- a/arch/mips/kernel/cevt-r4k.c ++++ b/arch/mips/kernel/cevt-r4k.c +@@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void) + } + + /* Sorted insert of 75th percentile into buf2 */ +- for (k = 0; k < i; ++k) { ++ for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) { + if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) { + l = min_t(unsigned int, + i, ARRAY_SIZE(buf2) - 1); +diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c +index 6430bff21fff..5c429d70e17f 100644 +--- a/arch/mips/kernel/elf.c ++++ b/arch/mips/kernel/elf.c +@@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, + else if ((prog_req.fr1 && prog_req.frdefault) || + (prog_req.single && !prog_req.frdefault)) + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ +- state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && ++ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) && + cpu_has_mips_r2_r6) ? + FP_FR1 : FP_FR0; + else if (prog_req.fr1) +diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c +index de63d36af895..732d6171ac6a 100644 +--- a/arch/mips/kernel/kgdb.c ++++ b/arch/mips/kernel/kgdb.c +@@ -244,9 +244,6 @@ static int compute_signal(int tt) + void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + { + int reg; +- struct thread_info *ti = task_thread_info(p); +- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; +- struct pt_regs *regs = (struct pt_regs *)ksp - 1; + #if (KGDB_GDB_REG_SIZE == 32) + u32 *ptr = (u32 *)gdb_regs; + #else +@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + #endif + + for (reg = 0; reg < 16; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = 0; + + /* S0 - S7 */ +- for (reg = 16; reg < 24; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = p->thread.reg16; ++ *(ptr++) = p->thread.reg17; ++ *(ptr++) = p->thread.reg18; ++ *(ptr++) = p->thread.reg19; ++ *(ptr++) = p->thread.reg20; ++ *(ptr++) = p->thread.reg21; ++ *(ptr++) = p->thread.reg22; ++ *(ptr++) = p->thread.reg23; + + for (reg = 24; reg < 28; reg++) + *(ptr++) = 0; + + /* GP, SP, FP, RA */ +- for (reg = 28; reg < 32; reg++) +- *(ptr++) = regs->regs[reg]; +- +- *(ptr++) = regs->cp0_status; +- *(ptr++) = regs->lo; +- *(ptr++) = regs->hi; +- *(ptr++) = regs->cp0_badvaddr; +- *(ptr++) = regs->cp0_cause; +- *(ptr++) = regs->cp0_epc; ++ *(ptr++) = (long)p; ++ *(ptr++) = p->thread.reg29; ++ *(ptr++) = p->thread.reg30; ++ *(ptr++) = p->thread.reg31; ++ ++ *(ptr++) = p->thread.cp0_status; ++ ++ /* lo, hi */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * BadVAddr, Cause ++ * Ideally these would come from the last exception frame up the stack ++ * but that requires unwinding, otherwise we can't know much for sure. ++ */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * PC ++ * use return address (RA), i.e. the moment after return from resume() ++ */ ++ *(ptr++) = p->thread.reg31; + } + + void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h +index 1fb317fbc0b3..b6802b978140 100644 +--- a/arch/sparc/include/asm/pgtable_64.h ++++ b/arch/sparc/include/asm/pgtable_64.h +@@ -673,26 +673,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd) + return pte_pfn(pte); + } + +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE +-static inline unsigned long pmd_dirty(pmd_t pmd) ++#define __HAVE_ARCH_PMD_WRITE ++static inline unsigned long pmd_write(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_dirty(pte); ++ return pte_write(pte); + } + +-static inline unsigned long pmd_young(pmd_t pmd) ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++static inline unsigned long pmd_dirty(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_young(pte); ++ return pte_dirty(pte); + } + +-static inline unsigned long pmd_write(pmd_t pmd) ++static inline unsigned long pmd_young(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return pte_write(pte); ++ return pte_young(pte); + } + + static inline unsigned long pmd_trans_huge(pmd_t pmd) +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index 37aa537b3ad8..bd7e2aa86c45 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -1495,7 +1495,7 @@ bool kern_addr_valid(unsigned long addr) + if ((long)addr < 0L) { + unsigned long pa = __pa(addr); + +- if ((addr >> max_phys_bits) != 0UL) ++ if ((pa >> max_phys_bits) != 0UL) + return false; + + return pfn_valid(pa >> PAGE_SHIFT); +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index 8639bb2ae058..6bf09f5594b2 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -983,6 +983,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, + unsigned long return_hooker = (unsigned long) + &return_to_handler; + ++ /* ++ * When resuming from suspend-to-ram, this function can be indirectly ++ * called from early CPU startup code while the CPU is in real mode, ++ * which would fail miserably. Make sure the stack pointer is a ++ * virtual address. ++ * ++ * This check isn't as accurate as virt_addr_valid(), but it should be ++ * good enough for this purpose, and it's fast. ++ */ ++ if (unlikely((long)__builtin_frame_address(0) >= 0)) ++ return; ++ + if (unlikely(ftrace_graph_is_dead())) + return; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 25eab453f2b2..e7b96f1ac2c5 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -685,6 +685,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + }, + }, ++ { ++ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 437e4807727d..90ed2e12d345 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -524,9 +524,7 @@ static const struct sdhci_ops sdhci_msm_ops = { + static const struct sdhci_pltfm_data sdhci_msm_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_NO_CARD_NO_RESET | +- SDHCI_QUIRK_SINGLE_POWER_WRITE | +- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, ++ SDHCI_QUIRK_SINGLE_POWER_WRITE, + .ops = &sdhci_msm_ops, + }; + +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c +index a0dabd4038ba..7ab24c5262f3 100644 +--- a/drivers/net/can/usb/gs_usb.c ++++ b/drivers/net/can/usb/gs_usb.c +@@ -740,13 +740,18 @@ static const struct net_device_ops gs_usb_netdev_ops = { + static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) + { + struct gs_can *dev = netdev_priv(netdev); +- struct gs_identify_mode imode; ++ struct gs_identify_mode *imode; + int rc; + ++ imode = kmalloc(sizeof(*imode), GFP_KERNEL); ++ ++ if (!imode) ++ return -ENOMEM; ++ + if (do_identify) +- imode.mode = GS_CAN_IDENTIFY_ON; ++ imode->mode = GS_CAN_IDENTIFY_ON; + else +- imode.mode = GS_CAN_IDENTIFY_OFF; ++ imode->mode = GS_CAN_IDENTIFY_OFF; + + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), +@@ -756,10 +761,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) + USB_RECIP_INTERFACE, + dev->channel, + 0, +- &imode, +- sizeof(imode), ++ imode, ++ sizeof(*imode), + 100); + ++ kfree(imode); ++ + return (rc > 0) ? 0 : rc; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 81d8e3bd01b6..21ce0b701143 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -82,7 +82,7 @@ + #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) + + #define MLX5_UMR_ALIGN (2048) +-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) ++#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) + + #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) + #define MLX5E_DEFAULT_LRO_TIMEOUT 32 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +index 90e81ae9f3bc..e034dbc4913d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +@@ -563,6 +563,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i + int idx = 0; + int err = 0; + ++ info->data = MAX_NUM_OF_ETHTOOL_RULES; + while ((!err || err == -ENOENT) && idx < info->rule_cnt) { + err = mlx5e_ethtool_get_flow(priv, info, location); + if (!err) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +index 55957246c0e8..b5d5519542e8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, + struct netdev_notifier_changeupper_info *info) + { + struct net_device *upper = info->upper_dev, *ndev_tmp; +- struct netdev_lag_upper_info *lag_upper_info; ++ struct netdev_lag_upper_info *lag_upper_info = NULL; + bool is_bonded; + int bond_status = 0; + int num_slaves = 0; +@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, + if (!netif_is_lag_master(upper)) + return 0; + +- lag_upper_info = info->upper_info; ++ if (info->linking) ++ lag_upper_info = info->upper_info; + + /* The event may still be of interest if the slave does not belong to + * us, but is enslaved to a master which has one or more of our netdevs +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 7a196a07fa51..d776db79e325 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", + FW_INIT_TIMEOUT_MILI); +- goto out_err; ++ goto err_cmd_cleanup; + } + + err = mlx5_core_enable_hca(dev, 0); +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 1a92de705199..a2d218b28c0e 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -1059,12 +1059,70 @@ static struct mdiobb_ops bb_ops = { + .get_mdio_data = sh_get_mdio, + }; + ++/* free Tx skb function */ ++static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) ++{ ++ struct sh_eth_private *mdp = netdev_priv(ndev); ++ struct sh_eth_txdesc *txdesc; ++ int free_num = 0; ++ int entry; ++ bool sent; ++ ++ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { ++ entry = mdp->dirty_tx % mdp->num_tx_ring; ++ txdesc = &mdp->tx_ring[entry]; ++ sent = !(txdesc->status & cpu_to_le32(TD_TACT)); ++ if (sent_only && !sent) ++ break; ++ /* TACT bit must be checked before all the following reads */ ++ dma_rmb(); ++ netif_info(mdp, tx_done, ndev, ++ "tx entry %d status 0x%08x\n", ++ entry, le32_to_cpu(txdesc->status)); ++ /* Free the original skb. */ ++ if (mdp->tx_skbuff[entry]) { ++ dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), ++ le32_to_cpu(txdesc->len) >> 16, ++ DMA_TO_DEVICE); ++ dev_kfree_skb_irq(mdp->tx_skbuff[entry]); ++ mdp->tx_skbuff[entry] = NULL; ++ free_num++; ++ } ++ txdesc->status = cpu_to_le32(TD_TFP); ++ if (entry >= mdp->num_tx_ring - 1) ++ txdesc->status |= cpu_to_le32(TD_TDLE); ++ ++ if (sent) { ++ ndev->stats.tx_packets++; ++ ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; ++ } ++ } ++ return free_num; ++} ++ + /* free skb and descriptor buffer */ + static void sh_eth_ring_free(struct net_device *ndev) + { + struct sh_eth_private *mdp = netdev_priv(ndev); + int ringsize, i; + ++ if (mdp->rx_ring) { ++ for (i = 0; i < mdp->num_rx_ring; i++) { ++ if (mdp->rx_skbuff[i]) { ++ struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; ++ ++ dma_unmap_single(&ndev->dev, ++ le32_to_cpu(rxdesc->addr), ++ ALIGN(mdp->rx_buf_sz, 32), ++ DMA_FROM_DEVICE); ++ } ++ } ++ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; ++ dma_free_coherent(NULL, ringsize, mdp->rx_ring, ++ mdp->rx_desc_dma); ++ mdp->rx_ring = NULL; ++ } ++ + /* Free Rx skb ringbuffer */ + if (mdp->rx_skbuff) { + for (i = 0; i < mdp->num_rx_ring; i++) +@@ -1073,27 +1131,18 @@ static void sh_eth_ring_free(struct net_device *ndev) + kfree(mdp->rx_skbuff); + mdp->rx_skbuff = NULL; + +- /* Free Tx skb ringbuffer */ +- if (mdp->tx_skbuff) { +- for (i = 0; i < mdp->num_tx_ring; i++) +- dev_kfree_skb(mdp->tx_skbuff[i]); +- } +- kfree(mdp->tx_skbuff); +- mdp->tx_skbuff = NULL; +- +- if (mdp->rx_ring) { +- ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; +- dma_free_coherent(NULL, ringsize, mdp->rx_ring, +- mdp->rx_desc_dma); +- mdp->rx_ring = NULL; +- } +- + if (mdp->tx_ring) { ++ sh_eth_tx_free(ndev, false); ++ + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + dma_free_coherent(NULL, ringsize, mdp->tx_ring, + mdp->tx_desc_dma); + mdp->tx_ring = NULL; + } ++ ++ /* Free Tx skb ringbuffer */ ++ kfree(mdp->tx_skbuff); ++ mdp->tx_skbuff = NULL; + } + + /* format skb and descriptor buffer */ +@@ -1341,43 +1390,6 @@ static void sh_eth_dev_exit(struct net_device *ndev) + update_mac_address(ndev); + } + +-/* free Tx skb function */ +-static int sh_eth_txfree(struct net_device *ndev) +-{ +- struct sh_eth_private *mdp = netdev_priv(ndev); +- struct sh_eth_txdesc *txdesc; +- int free_num = 0; +- int entry; +- +- for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { +- entry = mdp->dirty_tx % mdp->num_tx_ring; +- txdesc = &mdp->tx_ring[entry]; +- if (txdesc->status & cpu_to_le32(TD_TACT)) +- break; +- /* TACT bit must be checked before all the following reads */ +- dma_rmb(); +- netif_info(mdp, tx_done, ndev, +- "tx entry %d status 0x%08x\n", +- entry, le32_to_cpu(txdesc->status)); +- /* Free the original skb. */ +- if (mdp->tx_skbuff[entry]) { +- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), +- le32_to_cpu(txdesc->len) >> 16, +- DMA_TO_DEVICE); +- dev_kfree_skb_irq(mdp->tx_skbuff[entry]); +- mdp->tx_skbuff[entry] = NULL; +- free_num++; +- } +- txdesc->status = cpu_to_le32(TD_TFP); +- if (entry >= mdp->num_tx_ring - 1) +- txdesc->status |= cpu_to_le32(TD_TDLE); +- +- ndev->stats.tx_packets++; +- ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; +- } +- return free_num; +-} +- + /* Packet receive function */ + static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) + { +@@ -1620,7 +1632,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); + /* dirty buffer free */ +- sh_eth_txfree(ndev); ++ sh_eth_tx_free(ndev, true); + + /* SH7712 BUG */ + if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { +@@ -1679,7 +1691,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + +- sh_eth_txfree(ndev); ++ sh_eth_tx_free(ndev, true); + netif_wake_queue(ndev); + } + +@@ -2307,7 +2319,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) + + spin_lock_irqsave(&mdp->lock, flags); + if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { +- if (!sh_eth_txfree(ndev)) { ++ if (!sh_eth_tx_free(ndev, true)) { + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mdp->lock, flags); +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index d2e61e002926..f7c6a40aae81 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -2709,7 +2709,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, + } + + #define MACSEC_FEATURES \ +- (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) ++ (NETIF_F_SG | NETIF_F_HIGHDMA) + static struct lock_class_key macsec_netdev_addr_lock_key; + + static int macsec_dev_init(struct net_device *dev) +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 26d6f0bbe14b..dc8ccac0a01d 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -1140,6 +1140,7 @@ static int macvlan_port_create(struct net_device *dev) + static void macvlan_port_destroy(struct net_device *dev) + { + struct macvlan_port *port = macvlan_port_get_rtnl(dev); ++ struct sk_buff *skb; + + dev->priv_flags &= ~IFF_MACVLAN_PORT; + netdev_rx_handler_unregister(dev); +@@ -1148,7 +1149,15 @@ static void macvlan_port_destroy(struct net_device *dev) + * but we need to cancel it and purge left skbs if any. + */ + cancel_work_sync(&port->bc_work); +- __skb_queue_purge(&port->bc_queue); ++ ++ while ((skb = __skb_dequeue(&port->bc_queue))) { ++ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; ++ ++ if (src) ++ dev_put(src->dev); ++ ++ kfree_skb(skb); ++ } + + kfree_rcu(port, rcu); + } +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index 7a240fce3a7e..4865221aa9ac 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; + skb_queue_tail(&dp83640->rx_queue, skb); + schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); +- } else { +- netif_rx_ni(skb); + } + + return true; +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 201ffa5fe4f7..a9be26f1f677 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -552,16 +552,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) + EXPORT_SYMBOL(phy_mii_ioctl); + + /** +- * phy_start_aneg - start auto-negotiation for this PHY device ++ * phy_start_aneg_priv - start auto-negotiation for this PHY device + * @phydev: the phy_device struct ++ * @sync: indicate whether we should wait for the workqueue cancelation + * + * Description: Sanitizes the settings (if we're not autonegotiating + * them), and then calls the driver's config_aneg function. + * If the PHYCONTROL Layer is operating, we change the state to + * reflect the beginning of Auto-negotiation or forcing. + */ +-int phy_start_aneg(struct phy_device *phydev) ++static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) + { ++ bool trigger = 0; + int err; + + mutex_lock(&phydev->lock); +@@ -586,10 +588,40 @@ int phy_start_aneg(struct phy_device *phydev) + } + } + ++ /* Re-schedule a PHY state machine to check PHY status because ++ * negotiation may already be done and aneg interrupt may not be ++ * generated. ++ */ ++ if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { ++ err = phy_aneg_done(phydev); ++ if (err > 0) { ++ trigger = true; ++ err = 0; ++ } ++ } ++ + out_unlock: + mutex_unlock(&phydev->lock); ++ ++ if (trigger) ++ phy_trigger_machine(phydev, sync); ++ + return err; + } ++ ++/** ++ * phy_start_aneg - start auto-negotiation for this PHY device ++ * @phydev: the phy_device struct ++ * ++ * Description: Sanitizes the settings (if we're not autonegotiating ++ * them), and then calls the driver's config_aneg function. ++ * If the PHYCONTROL Layer is operating, we change the state to ++ * reflect the beginning of Auto-negotiation or forcing. ++ */ ++int phy_start_aneg(struct phy_device *phydev) ++{ ++ return phy_start_aneg_priv(phydev, true); ++} + EXPORT_SYMBOL(phy_start_aneg); + + /** +@@ -617,7 +649,7 @@ void phy_start_machine(struct phy_device *phydev) + * state machine runs. + */ + +-static void phy_trigger_machine(struct phy_device *phydev, bool sync) ++void phy_trigger_machine(struct phy_device *phydev, bool sync) + { + if (sync) + cancel_delayed_work_sync(&phydev->state_queue); +@@ -639,7 +671,7 @@ void phy_stop_machine(struct phy_device *phydev) + cancel_delayed_work_sync(&phydev->state_queue); + + mutex_lock(&phydev->lock); +- if (phydev->state > PHY_UP) ++ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) + phydev->state = PHY_UP; + mutex_unlock(&phydev->lock); + } +@@ -1100,7 +1132,7 @@ void phy_state_machine(struct work_struct *work) + mutex_unlock(&phydev->lock); + + if (needs_aneg) +- err = phy_start_aneg(phydev); ++ err = phy_start_aneg_priv(phydev, false); + else if (do_suspend) + phy_suspend(phydev); + +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index a2afb8ecb5bc..80ef4865cc8b 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -1124,7 +1124,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) + goto nla_put_failure; + + /* rule only needs to appear once */ +- nlh->nlmsg_flags &= NLM_F_EXCL; ++ nlh->nlmsg_flags |= NLM_F_EXCL; + + frh = nlmsg_data(nlh); + memset(frh, 0, sizeof(*frh)); +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index 12f2252f6c98..953275b651bc 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -2080,11 +2080,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) + if (inode_dirty_flags) + __mark_inode_dirty(inode, inode_dirty_flags); + +- if (ia_valid & ATTR_MODE) { +- err = posix_acl_chmod(inode, attr->ia_mode); +- if (err) +- goto out_put; +- } + + if (mask) { + req->r_inode = inode; +@@ -2098,13 +2093,11 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) + ceph_cap_string(dirtied), mask); + + ceph_mdsc_put_request(req); +- if (mask & CEPH_SETATTR_SIZE) +- __ceph_do_pending_vmtruncate(inode); +- ceph_free_cap_flush(prealloc_cf); +- return err; +-out_put: +- ceph_mdsc_put_request(req); + ceph_free_cap_flush(prealloc_cf); ++ ++ if (err >= 0 && (mask & CEPH_SETATTR_SIZE)) ++ __ceph_do_pending_vmtruncate(inode); ++ + return err; + } + +@@ -2123,7 +2116,12 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) + if (err != 0) + return err; + +- return __ceph_setattr(inode, attr); ++ err = __ceph_setattr(inode, attr); ++ ++ if (err >= 0 && (attr->ia_valid & ATTR_MODE)) ++ err = posix_acl_chmod(inode, attr->ia_mode); ++ ++ return err; + } + + /* +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c +index dba2ff8eaa68..452334694a5d 100644 +--- a/fs/nfsd/nfs3xdr.c ++++ b/fs/nfsd/nfs3xdr.c +@@ -358,6 +358,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + { + unsigned int len, v, hdr, dlen; + u32 max_blocksize = svc_max_payload(rqstp); ++ struct kvec *head = rqstp->rq_arg.head; ++ struct kvec *tail = rqstp->rq_arg.tail; + + p = decode_fh(p, &args->fh); + if (!p) +@@ -367,6 +369,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + args->count = ntohl(*p++); + args->stable = ntohl(*p++); + len = args->len = ntohl(*p++); ++ if ((void *)p > head->iov_base + head->iov_len) ++ return 0; + /* + * The count must equal the amount of data passed. + */ +@@ -377,9 +381,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + * Check to make sure that we got the right number of + * bytes. + */ +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len +- + rqstp->rq_arg.tail[0].iov_len - hdr; ++ hdr = (void*)p - head->iov_base; ++ dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; + /* + * Round the length of the data which was specified up to + * the next multiple of XDR units and then compare that +@@ -396,7 +399,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + len = args->len = max_blocksize; + } + rqstp->rq_vec[0].iov_base = (void*)p; +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr; + v = 0; + while (len > rqstp->rq_vec[v].iov_len) { + len -= rqstp->rq_vec[v].iov_len; +@@ -471,6 +474,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, + /* first copy and check from the first page */ + old = (char*)p; + vec = &rqstp->rq_arg.head[0]; ++ if ((void *)old > vec->iov_base + vec->iov_len) ++ return 0; + avail = vec->iov_len - (old - (char*)vec->iov_base); + while (len && avail && *old) { + *new++ = *old++; +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index a2b65fc56dd6..1645b977c9c6 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -733,6 +733,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr) + return nfserr; + } + ++/* ++ * A write procedure can have a large argument, and a read procedure can ++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and ++ * reply that can both be larger than a page. The xdr code has taken ++ * advantage of this assumption to be a sloppy about bounds checking in ++ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that ++ * problem, we enforce these assumptions here: ++ */ ++static bool nfs_request_too_big(struct svc_rqst *rqstp, ++ struct svc_procedure *proc) ++{ ++ /* ++ * The ACL code has more careful bounds-checking and is not ++ * susceptible to this problem: ++ */ ++ if (rqstp->rq_prog != NFS_PROGRAM) ++ return false; ++ /* ++ * Ditto NFSv4 (which can in theory have argument and reply both ++ * more than a page): ++ */ ++ if (rqstp->rq_vers >= 4) ++ return false; ++ /* The reply will be small, we're OK: */ ++ if (proc->pc_xdrressize > 0 && ++ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE)) ++ return false; ++ ++ return rqstp->rq_arg.len > PAGE_SIZE; ++} ++ + int + nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + { +@@ -745,6 +776,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + rqstp->rq_vers, rqstp->rq_proc); + proc = rqstp->rq_procinfo; + ++ if (nfs_request_too_big(rqstp, proc)) { ++ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers); ++ *statp = rpc_garbage_args; ++ return 1; ++ } + /* + * Give the xdr decoder a chance to change this if it wants + * (necessary in the NFSv4.0 compound case) +diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c +index 41b468a6a90f..de07ff625777 100644 +--- a/fs/nfsd/nfsxdr.c ++++ b/fs/nfsd/nfsxdr.c +@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + struct nfsd_writeargs *args) + { + unsigned int len, hdr, dlen; ++ struct kvec *head = rqstp->rq_arg.head; + int v; + + p = decode_fh(p, &args->fh); +@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + * Check to make sure that we got the right number of + * bytes. + */ +- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; +- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len +- - hdr; ++ hdr = (void*)p - head->iov_base; ++ if (hdr > head->iov_len) ++ return 0; ++ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; + + /* + * Round the length of the data which was specified up to +@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, + return 0; + + rqstp->rq_vec[0].iov_base = (void*)p; +- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; ++ rqstp->rq_vec[0].iov_len = head->iov_len - hdr; + v = 0; + while (len > rqstp->rq_vec[v].iov_len) { + len -= rqstp->rq_vec[v].iov_len; +diff --git a/include/linux/phy.h b/include/linux/phy.h +index e25f1830fbcf..bd22670e2182 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -806,6 +806,7 @@ void phy_change(struct work_struct *work); + void phy_mac_interrupt(struct phy_device *phydev, int new_link); + void phy_start_machine(struct phy_device *phydev); + void phy_stop_machine(struct phy_device *phydev); ++void phy_trigger_machine(struct phy_device *phydev, bool sync); + int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); + int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); + int phy_ethtool_ksettings_get(struct phy_device *phydev, +diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h +index f6598d1c886e..316e838b7470 100644 +--- a/include/uapi/linux/ipv6_route.h ++++ b/include/uapi/linux/ipv6_route.h +@@ -34,7 +34,7 @@ + #define RTF_PREF(pref) ((pref) << 27) + #define RTF_PREF_MASK 0x18000000 + +-#define RTF_PCPU 0x40000000 ++#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ + #define RTF_LOCAL 0x80000000 + + +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 85d1c9423ccb..7c9f94c53441 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1829,14 +1829,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, + + for (i = 0; i < MAX_BPF_REG; i++) + if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) +- regs[i].range = dst_reg->off; ++ /* keep the maximum range already checked */ ++ regs[i].range = max(regs[i].range, dst_reg->off); + + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { + if (state->stack_slot_type[i] != STACK_SPILL) + continue; + reg = &state->spilled_regs[i / BPF_REG_SIZE]; + if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) +- reg->range = dst_reg->off; ++ reg->range = max(reg->range, dst_reg->off); + } + } + +diff --git a/net/9p/client.c b/net/9p/client.c +index 3fc94a49ccd5..cf129fec7329 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } ++ if (rsize < count) { ++ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); ++ count = rsize; ++ } + + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 9901e5b75a05..f45f6198851f 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -859,7 +859,8 @@ static void neigh_probe(struct neighbour *neigh) + if (skb) + skb = skb_clone(skb, GFP_ATOMIC); + write_unlock(&neigh->lock); +- neigh->ops->solicit(neigh, skb); ++ if (neigh->ops->solicit) ++ neigh->ops->solicit(neigh, skb); + atomic_inc(&neigh->probes); + kfree_skb(skb); + } +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 53599bd0c82d..457f882b0f7b 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work) + while ((skb = skb_dequeue(&npinfo->txq))) { + struct net_device *dev = skb->dev; + struct netdev_queue *txq; ++ unsigned int q_index; + + if (!netif_device_present(dev) || !netif_running(dev)) { + kfree_skb(skb); + continue; + } + +- txq = skb_get_tx_queue(dev, skb); +- + local_irq_save(flags); ++ /* check if skb->queue_mapping is still valid */ ++ q_index = skb_get_queue_mapping(skb); ++ if (unlikely(q_index >= dev->real_num_tx_queues)) { ++ q_index = q_index % dev->real_num_tx_queues; ++ skb_set_queue_mapping(skb, q_index); ++ } ++ txq = netdev_get_tx_queue(dev, q_index); + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (netif_xmit_frozen_or_stopped(txq) || + netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index f0f462c0573d..fe008f1bd930 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -3076,22 +3076,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, + if (sg && csum && (mss != GSO_BY_FRAGS)) { + if (!(features & NETIF_F_GSO_PARTIAL)) { + struct sk_buff *iter; ++ unsigned int frag_len; + + if (!list_skb || + !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) + goto normal; + +- /* Split the buffer at the frag_list pointer. +- * This is based on the assumption that all +- * buffers in the chain excluding the last +- * containing the same amount of data. ++ /* If we get here then all the required ++ * GSO features except frag_list are supported. ++ * Try to split the SKB to multiple GSO SKBs ++ * with no frag_list. ++ * Currently we can do that only when the buffers don't ++ * have a linear part and all the buffers except ++ * the last are of the same length. + */ ++ frag_len = list_skb->len; + skb_walk_frags(head_skb, iter) { ++ if (frag_len != iter->len && iter->next) ++ goto normal; + if (skb_headlen(iter)) + goto normal; + + len -= iter->len; + } ++ ++ if (len != frag_len) ++ goto normal; + } + + /* GSO partial only requires that we trim off any excess that +@@ -3779,6 +3789,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = tstype; ++ serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { + serr->ee.ee_data = skb_shinfo(skb)->tskey; + if (sk->sk_protocol == IPPROTO_TCP && +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index 9826695ddfc6..4d37bdcbc2d5 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -474,16 +474,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk, + return false; + + /* Support IP_PKTINFO on tstamp packets if requested, to correlate +- * timestamp with egress dev. Not possible for packets without dev ++ * timestamp with egress dev. Not possible for packets without iif + * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). + */ +- if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || +- (!skb->dev)) ++ info = PKTINFO_SKB_CB(skb); ++ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) || ++ !info->ipi_ifindex) + return false; + +- info = PKTINFO_SKB_CB(skb); + info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; +- info->ipi_ifindex = skb->dev->ifindex; + return true; + } + +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 105c0748c52f..e612991c9185 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk) + void ping_unhash(struct sock *sk) + { + struct inet_sock *isk = inet_sk(sk); ++ + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); ++ write_lock_bh(&ping_table.lock); + if (sk_hashed(sk)) { +- write_lock_bh(&ping_table.lock); + hlist_nulls_del(&sk->sk_nulls_node); + sk_nulls_node_init(&sk->sk_nulls_node); + sock_put(sk); + isk->inet_num = 0; + isk->inet_sport = 0; + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); +- write_unlock_bh(&ping_table.lock); + } ++ write_unlock_bh(&ping_table.lock); + } + EXPORT_SYMBOL_GPL(ping_unhash); + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 17e6fbf30448..6dbcb37753d7 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2569,7 +2569,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) + skb_reset_network_header(skb); + + /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ +- ip_hdr(skb)->protocol = IPPROTO_ICMP; ++ ip_hdr(skb)->protocol = IPPROTO_UDP; + skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); + + src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 6a90a0e130dc..eb142ca71fc5 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2297,6 +2297,7 @@ int tcp_disconnect(struct sock *sk, int flags) + tcp_init_send_head(sk); + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); + __sk_dst_reset(sk); ++ tcp_saved_syn_free(tp); + + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); + +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c +index f9038d6b109e..baea5df43598 100644 +--- a/net/ipv4/tcp_cong.c ++++ b/net/ipv4/tcp_cong.c +@@ -167,12 +167,8 @@ void tcp_assign_congestion_control(struct sock *sk) + } + out: + rcu_read_unlock(); ++ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); + +- /* Clear out private data before diag gets it and +- * the ca has not been initialized. +- */ +- if (ca->get_info) +- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); + if (ca->flags & TCP_CONG_NEEDS_ECN) + INET_ECN_xmit(sk); + else +@@ -199,11 +195,10 @@ static void tcp_reinit_congestion_control(struct sock *sk, + tcp_cleanup_congestion_control(sk); + icsk->icsk_ca_ops = ca; + icsk->icsk_ca_setsockopt = 1; ++ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); + +- if (sk->sk_state != TCP_CLOSE) { +- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); ++ if (sk->sk_state != TCP_CLOSE) + tcp_init_congestion_control(sk); +- } + } + + /* Manage refcounts on socket close. */ +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 95dfcba38ff6..cffdbdbff3a2 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3253,14 +3253,24 @@ static void addrconf_gre_config(struct net_device *dev) + static int fixup_permanent_addr(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) + { +- if (!ifp->rt) { +- struct rt6_info *rt; ++ /* rt6i_ref == 0 means the host route was removed from the ++ * FIB, for example, if 'lo' device is taken down. In that ++ * case regenerate the host route. ++ */ ++ if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) { ++ struct rt6_info *rt, *prev; + + rt = addrconf_dst_alloc(idev, &ifp->addr, false); + if (unlikely(IS_ERR(rt))) + return PTR_ERR(rt); + ++ /* ifp->rt can be accessed outside of rtnl */ ++ spin_lock(&ifp->lock); ++ prev = ifp->rt; + ifp->rt = rt; ++ spin_unlock(&ifp->lock); ++ ++ ip6_rt_put(prev); + } + + if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { +@@ -3602,14 +3612,19 @@ static int addrconf_ifdown(struct net_device *dev, int how) + INIT_LIST_HEAD(&del_list); + list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { + struct rt6_info *rt = NULL; ++ bool keep; + + addrconf_del_dad_work(ifa); + ++ keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && ++ !addr_is_local(&ifa->addr); ++ if (!keep) ++ list_move(&ifa->if_list, &del_list); ++ + write_unlock_bh(&idev->lock); + spin_lock_bh(&ifa->lock); + +- if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && +- !addr_is_local(&ifa->addr)) { ++ if (keep) { + /* set state to skip the notifier below */ + state = INET6_IFADDR_STATE_DEAD; + ifa->state = 0; +@@ -3621,8 +3636,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) + } else { + state = ifa->state; + ifa->state = INET6_IFADDR_STATE_DEAD; +- +- list_move(&ifa->if_list, &del_list); + } + + spin_unlock_bh(&ifa->lock); +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index 8616d17cf08f..442ec1f39ed1 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -400,9 +400,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr) + * At one point, excluding local errors was a quick test to identify icmp/icmp6 + * errors. This is no longer true, but the test remained, so the v6 stack, + * unlike v4, also honors cmsg requests on all wifi and timestamp errors. +- * +- * Timestamp code paths do not initialize the fields expected by cmsg: +- * the PKTINFO fields in skb->cb[]. Fill those in here. + */ + static bool ip6_datagram_support_cmsg(struct sk_buff *skb, + struct sock_exterr_skb *serr) +@@ -414,14 +411,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb, + if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) + return false; + +- if (!skb->dev) ++ if (!IP6CB(skb)->iif) + return false; + +- if (skb->protocol == htons(ETH_P_IPV6)) +- IP6CB(skb)->iif = skb->dev->ifindex; +- else +- PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; +- + return true; + } + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index f6ba45242851..116b4da06820 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, + struct ip6_tnl *t = netdev_priv(dev); + struct net *net = t->net; + struct net_device_stats *stats = &t->dev->stats; +- struct ipv6hdr *ipv6h = ipv6_hdr(skb); ++ struct ipv6hdr *ipv6h; + struct ipv6_tel_txoption opt; + struct dst_entry *dst = NULL, *ndst = NULL; + struct net_device *tdev; +@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, + + /* NBMA tunnel */ + if (ipv6_addr_any(&t->parms.raddr)) { +- struct in6_addr *addr6; +- struct neighbour *neigh; +- int addr_type; ++ if (skb->protocol == htons(ETH_P_IPV6)) { ++ struct in6_addr *addr6; ++ struct neighbour *neigh; ++ int addr_type; + +- if (!skb_dst(skb)) +- goto tx_err_link_failure; ++ if (!skb_dst(skb)) ++ goto tx_err_link_failure; + +- neigh = dst_neigh_lookup(skb_dst(skb), +- &ipv6_hdr(skb)->daddr); +- if (!neigh) +- goto tx_err_link_failure; ++ neigh = dst_neigh_lookup(skb_dst(skb), ++ &ipv6_hdr(skb)->daddr); ++ if (!neigh) ++ goto tx_err_link_failure; + +- addr6 = (struct in6_addr *)&neigh->primary_key; +- addr_type = ipv6_addr_type(addr6); ++ addr6 = (struct in6_addr *)&neigh->primary_key; ++ addr_type = ipv6_addr_type(addr6); + +- if (addr_type == IPV6_ADDR_ANY) +- addr6 = &ipv6_hdr(skb)->daddr; ++ if (addr_type == IPV6_ADDR_ANY) ++ addr6 = &ipv6_hdr(skb)->daddr; + +- memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); +- neigh_release(neigh); ++ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); ++ neigh_release(neigh); ++ } + } else if (!(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only only if the routing decision does +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 7f4265b1649b..117405dd07a3 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -774,7 +774,8 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) + * Delete a VIF entry + */ + +-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) ++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, ++ struct list_head *head) + { + struct mif_device *v; + struct net_device *dev; +@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) + dev->ifindex, &in6_dev->cnf); + } + +- if (v->flags & MIFF_REGISTER) ++ if ((v->flags & MIFF_REGISTER) && !notify) + unregister_netdevice_queue(dev, head); + + dev_put(dev); +@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this, + struct mr6_table *mrt; + struct mif_device *v; + int ct; +- LIST_HEAD(list); + + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; +@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this, + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) +- mif6_delete(mrt, ct, &list); ++ mif6_delete(mrt, ct, 1, NULL); + } + } +- unregister_netdevice_many(&list); + + return NOTIFY_DONE; + } +@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all) + for (i = 0; i < mrt->maxvif; i++) { + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) + continue; +- mif6_delete(mrt, i, &list); ++ mif6_delete(mrt, i, 0, &list); + } + unregister_netdevice_many(&list); + +@@ -1706,7 +1705,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns + if (copy_from_user(&mifi, optval, sizeof(mifi_t))) + return -EFAULT; + rtnl_lock(); +- ret = mif6_delete(mrt, mifi, NULL); ++ ret = mif6_delete(mrt, mifi, 0, NULL); + rtnl_unlock(); + return ret; + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 869ffc76befa..ced3817539c2 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -1171,8 +1171,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) + spin_lock_bh(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + if (skb) +- amount = skb_tail_pointer(skb) - +- skb_transport_header(skb); ++ amount = skb->len; + spin_unlock_bh(&sk->sk_receive_queue.lock); + return put_user(amount, (int __user *)arg); + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 8d6c09f082c2..9f1bc756799a 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1826,6 +1826,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) + int addr_type; + int err = -EINVAL; + ++ /* RTF_PCPU is an internal flag; can not be set by userspace */ ++ if (cfg->fc_flags & RTF_PCPU) ++ goto out; ++ + if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) + goto out; + #ifndef CONFIG_IPV6_SUBTREES +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c +index a646f3481240..fecad1098cf8 100644 +--- a/net/kcm/kcmsock.c ++++ b/net/kcm/kcmsock.c +@@ -1685,7 +1685,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + struct kcm_attach info; + + if (copy_from_user(&info, (void __user *)arg, sizeof(info))) +- err = -EFAULT; ++ return -EFAULT; + + err = kcm_attach_ioctl(sock, &info); + +@@ -1695,7 +1695,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + struct kcm_unattach info; + + if (copy_from_user(&info, (void __user *)arg, sizeof(info))) +- err = -EFAULT; ++ return -EFAULT; + + err = kcm_unattach_ioctl(sock, &info); + +@@ -1706,7 +1706,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + struct socket *newsock = NULL; + + if (copy_from_user(&info, (void __user *)arg, sizeof(info))) +- err = -EFAULT; ++ return -EFAULT; + + err = kcm_clone(sock, &info, &newsock); + +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index a2ed3bda4ddc..e702cb95b89b 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn + } + EXPORT_SYMBOL_GPL(l2tp_session_find); + +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref) + { + int hash; + struct l2tp_session *session; +@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { + hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { + if (++count > nth) { ++ l2tp_session_inc_refcount(session); ++ if (do_ref && session->ref) ++ session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + return session; + } +@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + + return NULL; + } +-EXPORT_SYMBOL_GPL(l2tp_session_find_nth); ++EXPORT_SYMBOL_GPL(l2tp_session_get_nth); + + /* Lookup a session by interface name. + * This is very inefficient but is only used by management interfaces. +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index 181e755c2fc4..e7233bad65e0 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -243,7 +243,8 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) + struct l2tp_session *l2tp_session_find(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id); +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref); + struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); + struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); + struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); +diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c +index 2d6760a2ae34..d100aed3d06f 100644 +--- a/net/l2tp/l2tp_debugfs.c ++++ b/net/l2tp/l2tp_debugfs.c +@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) + + static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) + } + + /* Show the tunnel or session context */ +- if (pd->session == NULL) ++ if (!pd->session) { + l2tp_dfs_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + l2tp_dfs_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index ff750bb334fa..20669537816e 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -178,9 +178,10 @@ static int l2tp_ip_recv(struct sk_buff *skb) + + tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel != NULL) ++ if (tunnel) { + sk = tunnel->sock; +- else { ++ sock_hold(sk); ++ } else { + struct iphdr *iph = (struct iphdr *) skb_network_header(skb); + + read_lock_bh(&l2tp_ip_lock); +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index 1a65c9a517b6..a4b0c9232bf1 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -191,9 +191,10 @@ static int l2tp_ip6_recv(struct sk_buff *skb) + + tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel = l2tp_tunnel_find(net, tunnel_id); +- if (tunnel != NULL) ++ if (tunnel) { + sk = tunnel->sock; +- else { ++ sock_hold(sk); ++ } else { + struct ipv6hdr *iph = ipv6_hdr(skb); + + read_lock_bh(&l2tp_ip6_lock); +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index bf3117771822..9f66272b163b 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -844,7 +844,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + goto out; + } + +- session = l2tp_session_find_nth(tunnel, si); ++ session = l2tp_session_get_nth(tunnel, si, false); + if (session == NULL) { + ti++; + tunnel = NULL; +@@ -854,8 +854,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + + if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- session, L2TP_CMD_SESSION_GET) < 0) ++ session, L2TP_CMD_SESSION_GET) < 0) { ++ l2tp_session_dec_refcount(session); + break; ++ } ++ l2tp_session_dec_refcount(session); + + si++; + } +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index 41d47bfda15c..1387f547a09e 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session) + static void pppol2tp_session_destruct(struct sock *sk) + { + struct l2tp_session *session = sk->sk_user_data; ++ ++ skb_queue_purge(&sk->sk_receive_queue); ++ skb_queue_purge(&sk->sk_write_queue); ++ + if (session) { + sk->sk_user_data = NULL; + BUG_ON(session->magic != L2TP_SESSION_MAGIC); +@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock) + l2tp_session_queue_purge(session); + sock_put(sk); + } +- skb_queue_purge(&sk->sk_receive_queue); +- skb_queue_purge(&sk->sk_write_queue); +- + release_sock(sk); + + /* This will delete the session context via +@@ -1554,7 +1555,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) + + static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -1681,10 +1682,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) + + /* Show the tunnel or session context. + */ +- if (pd->session == NULL) ++ if (!pd->session) { + pppol2tp_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + pppol2tp_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +@@ -1843,4 +1848,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(PPPOL2TP_DRV_VERSION); + MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); +-MODULE_ALIAS_L2TP_PWTYPE(11); ++MODULE_ALIAS_L2TP_PWTYPE(7); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 8ab0974f4ee2..cb76ff3088e9 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3702,6 +3702,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv + return -EBUSY; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; ++ if (val > INT_MAX) ++ return -EINVAL; + po->tp_reserve = val; + return 0; + } +@@ -4247,6 +4249,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + rb->frames_per_block = req->tp_block_size / req->tp_frame_size; + if (unlikely(rb->frames_per_block == 0)) + goto out; ++ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) ++ goto out; + if (unlikely((rb->frames_per_block * req->tp_block_nr) != + req->tp_frame_nr)) + goto out; +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 673442025bfd..14346dccc4fe 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -6861,6 +6861,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) + if (sock->state != SS_UNCONNECTED) + goto out; + ++ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) ++ goto out; ++ + /* If backlog is zero, disable listening. */ + if (!backlog) { + if (sctp_sstate(sk, CLOSED)) +diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c +index 3b693e924db7..12ba83367b1b 100644 +--- a/sound/core/seq/seq_lock.c ++++ b/sound/core/seq/seq_lock.c +@@ -28,19 +28,16 @@ + /* wait until all locks are released */ + void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) + { +- int max_count = 5 * HZ; ++ int warn_count = 5 * HZ; + + if (atomic_read(lockp) < 0) { + pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); + return; + } + while (atomic_read(lockp) > 0) { +- if (max_count == 0) { +- pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); +- break; +- } ++ if (warn_count-- == 0) ++ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); + schedule_timeout_uninterruptible(1); +- max_count--; + } + } + +diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h +index f6769312ebfc..c3768cd494a5 100644 +--- a/sound/firewire/lib.h ++++ b/sound/firewire/lib.h +@@ -45,7 +45,7 @@ struct snd_fw_async_midi_port { + + struct snd_rawmidi_substream *substream; + snd_fw_async_midi_port_fill fill; +- unsigned int consume_bytes; ++ int consume_bytes; + }; + + int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port, +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c +index e629b88f7d93..474b06d8acd1 100644 +--- a/sound/firewire/oxfw/oxfw.c ++++ b/sound/firewire/oxfw/oxfw.c +@@ -226,11 +226,11 @@ static void do_registration(struct work_struct *work) + if (err < 0) + goto error; + +- err = detect_quirks(oxfw); ++ err = snd_oxfw_stream_discover(oxfw); + if (err < 0) + goto error; + +- err = snd_oxfw_stream_discover(oxfw); ++ err = detect_quirks(oxfw); + if (err < 0) + goto error; + +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c +index 4c8ff298ad26..d5873eeae1aa 100644 +--- a/sound/soc/intel/boards/bytcr_rt5640.c ++++ b/sound/soc/intel/boards/bytcr_rt5640.c +@@ -621,7 +621,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = { + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .platform_name = "sst-mfld-platform", +- .ignore_suspend = 1, ++ .nonatomic = true, + .dynamic = 1, + .dpcm_playback = 1, + .dpcm_capture = 1, +@@ -634,7 +634,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = { + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .platform_name = "sst-mfld-platform", +- .ignore_suspend = 1, + .nonatomic = true, + .dynamic = 1, + .dpcm_playback = 1, +@@ -661,6 +660,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = { + | SND_SOC_DAIFMT_CBS_CFS, + .be_hw_params_fixup = byt_rt5640_codec_fixup, + .ignore_suspend = 1, ++ .nonatomic = true, + .dpcm_playback = 1, + .dpcm_capture = 1, + .init = byt_rt5640_init, +diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c +index 35f591eab3c9..eabff3a857d0 100644 +--- a/sound/soc/intel/boards/bytcr_rt5651.c ++++ b/sound/soc/intel/boards/bytcr_rt5651.c +@@ -235,7 +235,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = { + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .platform_name = "sst-mfld-platform", +- .ignore_suspend = 1, + .nonatomic = true, + .dynamic = 1, + .dpcm_playback = 1, +@@ -249,7 +248,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = { + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .platform_name = "sst-mfld-platform", +- .ignore_suspend = 1, + .nonatomic = true, + .dynamic = 1, + .dpcm_playback = 1, |