summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-08-30 11:01:28 -0400
committerMike Pagano <mpagano@gentoo.org>2023-08-30 11:01:28 -0400
commit2517f4f2d49112296bfb055d2dc7cf420af21f21 (patch)
tree384d0107f7d77aaaa3f063537d4166536f531286
parentLinux patch 4.14.323 (diff)
downloadlinux-patches-4.14.tar.gz
linux-patches-4.14.tar.bz2
linux-patches-4.14.zip
Linux patch 4.14.3244.14-3354.14
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1323_linux-4.14.324.patch1593
2 files changed, 1597 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e92acc5f..eba98a9b 100644
--- a/0000_README
+++ b/0000_README
@@ -1335,6 +1335,10 @@ Patch: 1322_linux-4.14.323.patch
From: https://www.kernel.org
Desc: Linux 4.14.323
+Patch: 1323_linux-4.14.324.patch
+From: https://www.kernel.org
+Desc: Linux 4.14.324
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1323_linux-4.14.324.patch b/1323_linux-4.14.324.patch
new file mode 100644
index 00000000..7cd17ac4
--- /dev/null
+++ b/1323_linux-4.14.324.patch
@@ -0,0 +1,1593 @@
+diff --git a/Makefile b/Makefile
+index 529740dc29764..d1c052d0232f1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 323
++SUBLEVEL = 324
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
+index 09538ff5e9245..6f0405ba27d6d 100644
+--- a/arch/mips/include/asm/dec/prom.h
++++ b/arch/mips/include/asm/dec/prom.h
+@@ -74,7 +74,7 @@ static inline bool prom_is_rex(u32 magic)
+ */
+ typedef struct {
+ int pagesize;
+- unsigned char bitmap[0];
++ unsigned char bitmap[];
+ } memmap;
+
+
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 7d372db8bee11..e33b732ad3376 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -811,6 +811,14 @@ void __init fpu__init_system_xstate(void)
+ fpu__init_prepare_fx_sw_frame();
+ setup_init_fpu_buf();
+ setup_xstate_comp();
++
++ /*
++ * CPU capabilities initialization runs before FPU init. So
++ * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
++ * functional, set the feature bit so depending code works.
++ */
++ setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
++
+ print_xstate_offset_size();
+
+ pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index c07a304af8a38..95c9f81a514a2 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5658,6 +5658,7 @@ err_init_binder_device_failed:
+
+ err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
++ binder_alloc_shrinker_exit();
+
+ return ret;
+ }
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 1687368ea71f3..f7f0b71c9f688 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -1033,3 +1033,9 @@ void binder_alloc_shrinker_init(void)
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+ }
++
++void binder_alloc_shrinker_exit(void)
++{
++ unregister_shrinker(&binder_shrinker);
++ list_lru_destroy(&binder_alloc_lru);
++}
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index a3ad7683b6f23..7efcb46c00838 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -128,6 +128,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ int is_async);
+ extern void binder_alloc_init(struct binder_alloc *alloc);
+ void binder_alloc_shrinker_init(void);
++extern void binder_alloc_shrinker_exit(void);
+ extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+ extern struct binder_buffer *
+ binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 114b36674af42..29a4e2bb61f03 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -201,6 +201,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
+ */
+ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ {
++ LIST_HEAD(signalled);
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+@@ -213,21 +214,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ if (!timeline_fence_signaled(&pt->base))
+ break;
+
+- list_del_init(&pt->link);
++ dma_fence_get(&pt->base);
++
++ list_move_tail(&pt->link, &signalled);
+ rb_erase(&pt->node, &obj->pt_tree);
+
+- /*
+- * A signal callback may release the last reference to this
+- * fence, causing it to be freed. That operation has to be
+- * last to avoid a use after free inside this loop, and must
+- * be after we remove the fence from the timeline in order to
+- * prevent deadlocking on timeline->lock inside
+- * timeline_fence_release().
+- */
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
++
++ list_for_each_entry_safe(pt, next, &signalled, link) {
++ list_del_init(&pt->link);
++ dma_fence_put(&pt->base);
++ }
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 8a8b65b1b5a9a..7bad519aaae08 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1343,6 +1343,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
++ if (r > 0 && fence->error)
++ r = fence->error;
++
+ dma_fence_put(fence);
+ if (r < 0)
+ return r;
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 1ae31dbc61c64..5e61abb3dce5c 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -265,7 +265,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ {
+ struct drm_radeon_cs *cs = data;
+ uint64_t *chunk_array_ptr;
+- unsigned size, i;
++ u64 size;
++ unsigned i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
+
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+index 1b1a28abbf1f6..a5af785fb9948 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+@@ -766,6 +766,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
+ return -EINVAL;
+
+ if (*nplanes) {
++ if (*nplanes != q_data->fmt->num_planes)
++ return -EINVAL;
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+index 019a5e7e1a402..de5e732b1f0b6 100644
+--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+@@ -536,16 +536,18 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
+ int vpu_load_firmware(struct platform_device *pdev)
+ {
+ struct mtk_vpu *vpu;
+- struct device *dev = &pdev->dev;
++ struct device *dev;
+ struct vpu_run *run;
+ const struct firmware *vpu_fw = NULL;
+ int ret;
+
+ if (!pdev) {
+- dev_err(dev, "VPU platform device is invalid\n");
++ pr_err("VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
++ dev = &pdev->dev;
++
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 6e0f37f373e0d..b3d5762e8733c 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1723,8 +1723,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+-
+- mmc_free_host(mmc);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index e0dfec57c3025..cbf2d78765a4f 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -209,7 +209,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+- asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
++ asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
+ }
+
+ /* replenish the buffers for a pool. note that we don't need to
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index d591b3e6bd7c5..cba97e68be402 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -233,11 +233,11 @@ read_nvm_exit:
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+- * @words: number of words to write
+- * @data: buffer with words to write to the Shadow RAM
++ * @words: number of words to read
++ * @data: buffer with words to read to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+@@ -256,18 +256,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
++ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+- /* We can write only up to 4KB (one sector), in one AQ write */
++ /* We can read only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write fail error: tried to write %d words, limit is %d.\n",
++ "NVM read fail error: tried to read %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+- /* A single write cannot spread over two sectors */
++ /* A single read cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 295d27f331042..179e1d74661d4 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1195,18 +1195,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ return;
+ }
+
+- spin_lock_init(&adapter->tmreg_lock);
+- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+- if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+- igb_ptp_overflow_check);
+-
+- adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+- adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+- igb_ptp_reset(adapter);
+-
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+@@ -1216,6 +1204,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++ spin_lock_init(&adapter->tmreg_lock);
++ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++ igb_ptp_overflow_check);
++
++ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++ igb_ptp_reset(adapter);
+ }
+ }
+
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 97e017a54eb5a..2fbb1277b3a8d 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -403,6 +403,17 @@ static int bcm5482_read_status(struct phy_device *phydev)
+ return err;
+ }
+
++static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
++{
++ return -EOPNOTSUPP;
++}
++
++static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
++ u16 val)
++{
++ return -EOPNOTSUPP;
++}
++
+ static int bcm5481_config_aneg(struct phy_device *phydev)
+ {
+ struct device_node *np = phydev->mdio.dev.of_node;
+@@ -650,6 +661,8 @@ static struct phy_driver broadcom_drivers[] = {
+ .name = "Broadcom BCM54810",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
++ .read_mmd = bcm54810_read_mmd,
++ .write_mmd = bcm54810_write_mmd,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .read_status = genphy_read_status,
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index b318464a4fcad..7b6cae28f6d3d 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2160,7 +2160,9 @@ static void team_setup(struct net_device *dev)
+
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_CTAG_RX |
+- NETIF_F_HW_VLAN_CTAG_FILTER;
++ NETIF_F_HW_VLAN_CTAG_FILTER |
++ NETIF_F_HW_VLAN_STAG_RX |
++ NETIF_F_HW_VLAN_STAG_FILTER;
+
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ dev->features |= dev->hw_features;
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index 2e96d9273b780..e5ec8a2c022a2 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -1056,6 +1056,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
+ q = p->next;
+ kfree(p);
+ }
++
++ kfree(data);
+ }
+
+
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index 37b93c3c94515..2e054b19a25e3 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -209,54 +209,6 @@ raid_attr_ro_state(level);
+ raid_attr_ro_fn(resync);
+ raid_attr_ro_state_fn(state);
+
+-static void raid_component_release(struct device *dev)
+-{
+- struct raid_component *rc =
+- container_of(dev, struct raid_component, dev);
+- dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
+- put_device(rc->dev.parent);
+- kfree(rc);
+-}
+-
+-int raid_component_add(struct raid_template *r,struct device *raid_dev,
+- struct device *component_dev)
+-{
+- struct device *cdev =
+- attribute_container_find_class_device(&r->raid_attrs.ac,
+- raid_dev);
+- struct raid_component *rc;
+- struct raid_data *rd = dev_get_drvdata(cdev);
+- int err;
+-
+- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+- if (!rc)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&rc->node);
+- device_initialize(&rc->dev);
+- rc->dev.release = raid_component_release;
+- rc->dev.parent = get_device(component_dev);
+- rc->num = rd->component_count++;
+-
+- dev_set_name(&rc->dev, "component-%d", rc->num);
+- list_add_tail(&rc->node, &rd->component_list);
+- rc->dev.class = &raid_class.class;
+- err = device_add(&rc->dev);
+- if (err)
+- goto err_out;
+-
+- return 0;
+-
+-err_out:
+- put_device(&rc->dev);
+- list_del(&rc->node);
+- rd->component_count--;
+- put_device(component_dev);
+- kfree(rc);
+- return err;
+-}
+-EXPORT_SYMBOL(raid_component_add);
+-
+ struct raid_template *
+ raid_class_attach(struct raid_function_template *ft)
+ {
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index 388ba2ebcce52..02b80291c1360 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -316,12 +316,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ "Snic Tgt: device_add, with err = %d\n",
+ ret);
+
+- put_device(&tgt->dev);
+ put_device(&snic->shost->shost_gendev);
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+ list_del(&tgt->list);
+ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+- kfree(tgt);
++ put_device(&tgt->dev);
+ tgt = NULL;
+
+ return tgt;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index d3083f19d2136..21a94bb750657 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -3191,6 +3191,7 @@ void serial8250_init_port(struct uart_8250_port *up)
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
++ port->pm = NULL;
+ port->ops = &serial8250_pops;
+
+ up->cur_iotype = 0xFF;
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index b6f83d5df9fde..14b1e5a1dcce0 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -525,7 +525,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ ret = -ENOENT;
+ goto failed;
+ }
+- clk_prepare_enable(ctrl->clk);
++ ret = clk_prepare_enable(ctrl->clk);
++ if (ret)
++ goto failed;
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 46e8e9324b58f..c8a0d4894cfee 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3939,9 +3939,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+
+ io_error:
+ kunmap(page);
+- unlock_page(page);
+
+ read_complete:
++ unlock_page(page);
+ return rc;
+ }
+
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 73290263402a3..2167503f17536 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1363,7 +1363,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ {
+ struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
+ struct gfs2_args *args = &sdp->sd_args;
+- int val;
++ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
++
++ spin_lock(&sdp->sd_tune.gt_spin);
++ logd_secs = sdp->sd_tune.gt_logd_secs;
++ quota_quantum = sdp->sd_tune.gt_quota_quantum;
++ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
++ statfs_slow = sdp->sd_tune.gt_statfs_slow;
++ spin_unlock(&sdp->sd_tune.gt_spin);
+
+ if (is_ancestor(root, sdp->sd_master_dir))
+ seq_puts(s, ",meta");
+@@ -1418,17 +1425,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ }
+ if (args->ar_discard)
+ seq_puts(s, ",discard");
+- val = sdp->sd_tune.gt_logd_secs;
+- if (val != 30)
+- seq_printf(s, ",commit=%d", val);
+- val = sdp->sd_tune.gt_statfs_quantum;
+- if (val != 30)
+- seq_printf(s, ",statfs_quantum=%d", val);
+- else if (sdp->sd_tune.gt_statfs_slow)
++ if (logd_secs != 30)
++ seq_printf(s, ",commit=%d", logd_secs);
++ if (statfs_quantum != 30)
++ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
++ else if (statfs_slow)
+ seq_puts(s, ",statfs_quantum=0");
+- val = sdp->sd_tune.gt_quota_quantum;
+- if (val != 60)
+- seq_printf(s, ",quota_quantum=%d", val);
++ if (quota_quantum != 60)
++ seq_printf(s, ",quota_quantum=%d", quota_quantum);
+ if (args->ar_statfs_percent)
+ seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
+ if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index cc2ac1f324b08..464ddaf8ebd10 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -2040,6 +2040,9 @@ dbAllocDmapLev(struct bmap * bmp,
+ if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+ return -ENOSPC;
+
++ if (leafidx < 0)
++ return -EIO;
++
+ /* determine the block number within the file system corresponding
+ * to the leaf at which free space was found.
+ */
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index 224ef034004b7..2cb460912468e 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -367,6 +367,11 @@ tid_t txBegin(struct super_block *sb, int flag)
+ jfs_info("txBegin: flag = 0x%x", flag);
+ log = JFS_SBI(sb)->log;
+
++ if (!log) {
++ jfs_error(sb, "read-only filesystem\n");
++ return 0;
++ }
++
+ TXN_LOCK();
+
+ INCREMENT(TxStat.txBegin);
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 56c3fcbfe80ed..6726dcddd6f86 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -813,6 +813,11 @@ static int jfs_link(struct dentry *old_dentry,
+ if (rc)
+ goto out;
+
++ if (isReadOnly(ip)) {
++ jfs_error(ip->i_sb, "read-only filesystem\n");
++ return -EROFS;
++ }
++
+ tid = txBegin(ip->i_sb, 0);
+
+ mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 7c364cda8daac..1629d50782bf9 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -540,7 +540,7 @@ restart:
+ continue;
+ /* Wait for dquot users */
+ if (atomic_read(&dquot->dq_count)) {
+- dqgrab(dquot);
++ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ /*
+ * Once dqput() wakes us up, we know it's time to free
+@@ -2387,7 +2387,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+
+ error = add_dquot_ref(sb, type);
+ if (error)
+- dquot_disable(sb, type, flags);
++ dquot_disable(sb, type,
++ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+
+ return error;
+ out_file_init:
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index 61a1738895b7a..ad04dc2278339 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -268,7 +268,7 @@ static int udf_name_from_CS0(uint8_t *str_o, int str_max_len,
+ }
+
+ if (translate) {
+- if (str_o_len <= 2 && str_o[0] == '.' &&
++ if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
+ (str_o_len == 1 || str_o[1] == '.'))
+ needsCRC = 1;
+ if (needsCRC) {
+diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
+index 31e1ff69efc8c..5c0700a3d1df4 100644
+--- a/include/linux/raid_class.h
++++ b/include/linux/raid_class.h
+@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
+
+ struct raid_template *raid_class_attach(struct raid_function_template *);
+ void raid_class_release(struct raid_template *);
+-
+-int __must_check raid_component_add(struct raid_template *, struct device *,
+- struct device *);
+-
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index db8ab0fac81a2..7517dd15f87b4 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -146,6 +146,10 @@ retry:
+ if (gso_type & SKB_GSO_UDP)
+ nh_off -= thlen;
+
++ /* Kernel has a special handling for GSO_BY_FRAGS. */
++ if (gso_size == GSO_BY_FRAGS)
++ return -EINVAL;
++
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - nh_off > gso_size) {
+ shinfo->gso_size = gso_size;
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index e157d5c9b224e..239bcc4b7e95a 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -392,7 +392,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ static inline
+ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->out_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+@@ -404,7 +411,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ static inline
+ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->cap_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+diff --git a/include/net/sock.h b/include/net/sock.h
+index def9dc1ddda11..7b42ddca4decb 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1115,6 +1115,7 @@ struct proto {
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
++ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ * All the __sk_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+@@ -1211,6 +1212,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ return sk->sk_prot->memory_pressure != NULL;
+ }
+
++static inline bool sk_under_global_memory_pressure(const struct sock *sk)
++{
++ return sk->sk_prot->memory_pressure &&
++ !!READ_ONCE(*sk->sk_prot->memory_pressure);
++}
++
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ {
+ if (!sk->sk_prot->memory_pressure)
+@@ -1220,7 +1227,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return !!*sk->sk_prot->memory_pressure;
++ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline long
+@@ -1274,7 +1281,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+ if (!prot->memory_pressure)
+ return false;
+- return !!*prot->memory_pressure;
++ return !!READ_ONCE(*prot->memory_pressure);
+ }
+
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f5d084b88228c..c851b6fe45b27 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3275,8 +3275,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ * will point to the same string as current_trace->name.
+ */
+ mutex_lock(&trace_types_lock);
+- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++ /* Close iter->trace before switching to the new current tracer */
++ if (iter->trace->close)
++ iter->trace->close(iter);
+ *iter->trace = *tr->current_trace;
++ /* Reopen the new current tracer */
++ if (iter->trace->open)
++ iter->trace->open(iter);
++ }
+ mutex_unlock(&trace_types_lock);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 2d9e12380dc3b..2e67aeb6aed37 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -218,7 +218,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
+-
++ else
++ iter->private = NULL;
+ }
+
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index a5a4b56631630..ad458724bf960 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -287,6 +287,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
++ else
++ iter->private = NULL;
+ }
+
+ static void wakeup_trace_close(struct trace_iterator *iter)
+diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
+index 2e11e48446abf..ca0582d33532f 100644
+--- a/lib/clz_ctz.c
++++ b/lib/clz_ctz.c
+@@ -30,36 +30,16 @@ int __weak __clzsi2(int val)
+ }
+ EXPORT_SYMBOL(__clzsi2);
+
+-int __weak __clzdi2(long val);
+-int __weak __ctzdi2(long val);
+-#if BITS_PER_LONG == 32
+-
+-int __weak __clzdi2(long val)
++int __weak __clzdi2(u64 val);
++int __weak __clzdi2(u64 val)
+ {
+- return 32 - fls((int)val);
++ return 64 - fls64(val);
+ }
+ EXPORT_SYMBOL(__clzdi2);
+
+-int __weak __ctzdi2(long val)
++int __weak __ctzdi2(u64 val);
++int __weak __ctzdi2(u64 val)
+ {
+- return __ffs((u32)val);
++ return __ffs64(val);
+ }
+ EXPORT_SYMBOL(__ctzdi2);
+-
+-#elif BITS_PER_LONG == 64
+-
+-int __weak __clzdi2(long val)
+-{
+- return 64 - fls64((u64)val);
+-}
+-EXPORT_SYMBOL(__clzdi2);
+-
+-int __weak __ctzdi2(long val)
+-{
+- return __ffs64((u64)val);
+-}
+-EXPORT_SYMBOL(__ctzdi2);
+-
+-#else
+-#error BITS_PER_LONG not 32 or 64
+-#endif
+diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
+index 6c5229f98c9eb..cac4e5aee7395 100644
+--- a/lib/mpi/longlong.h
++++ b/lib/mpi/longlong.h
+@@ -639,30 +639,12 @@ do { \
+ ************** MIPS *****************
+ ***************************************/
+ #if defined(__mips__) && W_TYPE_SIZE == 32
+-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+ #define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+ } while (0)
+-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+-#define umul_ppmm(w1, w0, u, v) \
+- __asm__ ("multu %2,%3" \
+- : "=l" ((USItype)(w0)), \
+- "=h" ((USItype)(w1)) \
+- : "d" ((USItype)(u)), \
+- "d" ((USItype)(v)))
+-#else
+-#define umul_ppmm(w1, w0, u, v) \
+- __asm__ ("multu %2,%3\n" \
+- "mflo %0\n" \
+- "mfhi %1" \
+- : "=d" ((USItype)(w0)), \
+- "=d" ((USItype)(w1)) \
+- : "d" ((USItype)(u)), \
+- "d" ((USItype)(v)))
+-#endif
+ #define UMUL_TIME 10
+ #define UDIV_TIME 100
+ #endif /* __mips__ */
+@@ -687,7 +669,7 @@ do { \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+ } while (0)
+-#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
++#else
+ #define umul_ppmm(w1, w0, u, v) \
+ do { \
+ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
+@@ -695,22 +677,6 @@ do { \
+ w1 = __ll >> 64; \
+ w0 = __ll; \
+ } while (0)
+-#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+-#define umul_ppmm(w1, w0, u, v) \
+- __asm__ ("dmultu %2,%3" \
+- : "=l" ((UDItype)(w0)), \
+- "=h" ((UDItype)(w1)) \
+- : "d" ((UDItype)(u)), \
+- "d" ((UDItype)(v)))
+-#else
+-#define umul_ppmm(w1, w0, u, v) \
+- __asm__ ("dmultu %2,%3\n" \
+- "mflo %0\n" \
+- "mfhi %1" \
+- : "=d" ((UDItype)(w0)), \
+- "=d" ((UDItype)(w1)) \
+- : "d" ((UDItype)(u)), \
+- "d" ((UDItype)(v)))
+ #endif
+ #define UMUL_TIME 20
+ #define UDIV_TIME 140
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 34210306ea66d..d407e5e670f35 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -283,16 +283,26 @@ static ssize_t config_test_show_str(char *dst,
+ return len;
+ }
+
+-static int test_dev_config_update_bool(const char *buf, size_t size,
+- bool *cfg)
++static inline int __test_dev_config_update_bool(const char *buf, size_t size,
++ bool *cfg)
+ {
+ int ret;
+
+- mutex_lock(&test_fw_mutex);
+ if (strtobool(buf, cfg) < 0)
+ ret = -EINVAL;
+ else
+ ret = size;
++
++ return ret;
++}
++
++static int test_dev_config_update_bool(const char *buf, size_t size,
++ bool *cfg)
++{
++ int ret;
++
++ mutex_lock(&test_fw_mutex);
++ ret = __test_dev_config_update_bool(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+@@ -322,7 +332,7 @@ static ssize_t test_dev_config_show_int(char *buf, int cfg)
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ }
+
+-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++static inline int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ {
+ int ret;
+ long new;
+@@ -334,14 +344,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ if (new > U8_MAX)
+ return -EINVAL;
+
+- mutex_lock(&test_fw_mutex);
+ *(u8 *)cfg = new;
+- mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+ }
+
++static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++{
++ int ret;
++
++ mutex_lock(&test_fw_mutex);
++ ret = __test_dev_config_update_u8(buf, size, cfg);
++ mutex_unlock(&test_fw_mutex);
++
++ return ret;
++}
++
+ static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
+ {
+ u8 val;
+@@ -374,10 +393,10 @@ static ssize_t config_num_requests_store(struct device *dev,
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+- mutex_unlock(&test_fw_mutex);
+
+- rc = test_dev_config_update_u8(buf, count,
+- &test_fw_config->num_requests);
++ rc = __test_dev_config_update_u8(buf, count,
++ &test_fw_config->num_requests);
++ mutex_unlock(&test_fw_mutex);
+
+ out:
+ return rc;
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index fbc132f4670e0..58e8470b160e9 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -507,7 +507,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_hard_iface *primary_if;
+- struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
++ struct ethhdr *ethhdr;
+ bool res;
+ int ret = NET_RX_DROP;
+
+@@ -515,6 +515,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ if (!res)
+ goto free_skb;
+
++ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
+index c49c48866a3fc..fc5a9e1968e82 100644
+--- a/net/batman-adv/bat_v_ogm.c
++++ b/net/batman-adv/bat_v_ogm.c
+@@ -118,8 +118,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
+ {
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+- if (hard_iface->if_status != BATADV_IF_ACTIVE)
++ if (hard_iface->if_status != BATADV_IF_ACTIVE) {
++ kfree_skb(skb);
+ return;
++ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+@@ -831,7 +833,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ {
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+- struct ethhdr *ethhdr = eth_hdr(skb);
++ struct ethhdr *ethhdr;
+ int ogm_offset;
+ u8 *packet_pos;
+ int ret = NET_RX_DROP;
+@@ -845,6 +847,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
+ goto free_skb;
+
++ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 5fe2e63370ad7..f104002e10526 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -625,7 +625,7 @@ out:
+ /* adjusts the MTU if a new interface with a smaller MTU appeared. */
+ void batadv_update_min_mtu(struct net_device *soft_iface)
+ {
+- soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
++ dev_set_mtu(soft_iface, batadv_hardif_min_mtu(soft_iface));
+
+ /* Check if the local translate table should be cleaned up to match a
+ * new (and smaller) MTU.
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 47b19ad5a02e1..850447cffb80c 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -791,7 +791,6 @@ check_roaming:
+ if (roamed_back) {
+ batadv_tt_global_free(bat_priv, tt_global,
+ "Roaming canceled");
+- tt_global = NULL;
+ } else {
+ /* The global entry has to be marked as ROAMING and
+ * has to be kept for consistency purpose
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 25d88b8cfae97..6bae68b5d439c 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5705,9 +5705,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ if (!chan)
+ goto done;
+
++ chan = l2cap_chan_hold_unless_zero(chan);
++ if (!chan)
++ goto done;
++
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ done:
+ mutex_unlock(&conn->chan_lock);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index a76f3024687f0..0478f8cddd961 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2311,7 +2311,10 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+- else if (tb[IFLA_IFNAME])
++ else if (ifm->ifi_index < 0) {
++ NL_SET_ERR_MSG(extack, "ifindex can't be negative");
++ return -EINVAL;
++ } else if (tb[IFLA_IFNAME])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ goto errout;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0ff80718f194d..a7a0bc9c2a9f0 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2459,7 +2459,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+ mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+
+- if (sk_under_memory_pressure(sk) &&
++ if (sk_under_global_memory_pressure(sk) &&
+ (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ sk_leave_memory_pressure(sk);
+ }
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 33a85269a9f26..d43180dd543e3 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -325,12 +325,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ default:
+ dev->stats.tx_errors++;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 895129b0928c2..d708094952056 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -540,7 +540,9 @@ out_reset_timer:
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
++ icsk->icsk_rto = clamp(__tcp_set_rto(tp),
++ tcp_rto_min(sk),
++ TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index a4ba470186482..976199055e85b 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -570,12 +570,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ vti6_addr_conflict(t, ipv6h))
+ goto tx_err;
+
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 49813e6d05ed7..197990b9b97df 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1858,9 +1858,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+- if ((xfilter->sadb_x_filter_splen >=
++ if ((xfilter->sadb_x_filter_splen >
+ (sizeof(xfrm_address_t) << 3)) ||
+- (xfilter->sadb_x_filter_dplen >=
++ (xfilter->sadb_x_filter_dplen >
+ (sizeof(xfrm_address_t) << 3))) {
+ mutex_unlock(&pfk->dump_lock);
+ return -EINVAL;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index ecc16d8c1cc31..afbb06a9db337 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1648,6 +1648,7 @@ static int ip_vs_zero_all(struct netns_ipvs *ipvs)
+ #ifdef CONFIG_SYSCTL
+
+ static int zero;
++static int one = 1;
+ static int three = 3;
+
+ static int
+@@ -1659,12 +1660,18 @@ proc_do_defense_mode(struct ctl_table *table, int write,
+ int val = *valp;
+ int rc;
+
+- rc = proc_dointvec(table, write, buffer, lenp, ppos);
++ struct ctl_table tmp = {
++ .data = &val,
++ .maxlen = sizeof(int),
++ .mode = table->mode,
++ };
++
++ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+- if ((*valp < 0) || (*valp > 3)) {
+- /* Restore the correct value */
+- *valp = val;
++ if (val < 0 || val > 3) {
++ rc = -EINVAL;
+ } else {
++ *valp = val;
+ update_defense_level(ipvs);
+ }
+ }
+@@ -1675,37 +1682,27 @@ static int
+ proc_do_sync_threshold(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
++ struct netns_ipvs *ipvs = table->extra2;
+ int *valp = table->data;
+ int val[2];
+ int rc;
++ struct ctl_table tmp = {
++ .data = &val,
++ .maxlen = table->maxlen,
++ .mode = table->mode,
++ };
+
+- /* backup the value first */
++ mutex_lock(&ipvs->sync_mutex);
+ memcpy(val, valp, sizeof(val));
+-
+- rc = proc_dointvec(table, write, buffer, lenp, ppos);
+- if (write && (valp[0] < 0 || valp[1] < 0 ||
+- (valp[0] >= valp[1] && valp[1]))) {
+- /* Restore the correct value */
+- memcpy(valp, val, sizeof(val));
+- }
+- return rc;
+-}
+-
+-static int
+-proc_do_sync_mode(struct ctl_table *table, int write,
+- void __user *buffer, size_t *lenp, loff_t *ppos)
+-{
+- int *valp = table->data;
+- int val = *valp;
+- int rc;
+-
+- rc = proc_dointvec(table, write, buffer, lenp, ppos);
+- if (write && (*valp != val)) {
+- if ((*valp < 0) || (*valp > 1)) {
+- /* Restore the correct value */
+- *valp = val;
+- }
++ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
++ if (write) {
++ if (val[0] < 0 || val[1] < 0 ||
++ (val[0] >= val[1] && val[1]))
++ rc = -EINVAL;
++ else
++ memcpy(valp, val, sizeof(val));
+ }
++ mutex_unlock(&ipvs->sync_mutex);
+ return rc;
+ }
+
+@@ -1717,12 +1714,18 @@ proc_do_sync_ports(struct ctl_table *table, int write,
+ int val = *valp;
+ int rc;
+
+- rc = proc_dointvec(table, write, buffer, lenp, ppos);
++ struct ctl_table tmp = {
++ .data = &val,
++ .maxlen = sizeof(int),
++ .mode = table->mode,
++ };
++
++ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+- if (*valp < 1 || !is_power_of_2(*valp)) {
+- /* Restore the correct value */
++ if (val < 1 || !is_power_of_2(val))
++ rc = -EINVAL;
++ else
+ *valp = val;
+- }
+ }
+ return rc;
+ }
+@@ -1782,7 +1785,9 @@ static struct ctl_table vs_vars[] = {
+ .procname = "sync_version",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_do_sync_mode,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
+ },
+ {
+ .procname = "sync_ports",
+@@ -3974,6 +3979,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
+ ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
++ tbl[idx].extra2 = ipvs;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 1278b27c625ab..c857c68ac887b 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -57,8 +57,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+ [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
+- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
+@@ -116,7 +116,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
++/* init */ {sCW, sCW, sCW, sCE, sES, sCL, sCL, sSA, sCW, sHA},
+ /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+ /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+ /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index d1dc5c8937a56..461bdecbe7fc2 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -137,6 +137,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
++ if (set->flags & NFT_SET_OBJECT)
++ return -EOPNOTSUPP;
++
+ if (set->ops->update == NULL)
+ return -EOPNOTSUPP;
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 9414dcb376d26..e5c3c37108e4e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -110,7 +110,7 @@ struct percpu_counter sctp_sockets_allocated;
+
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+- sctp_memory_pressure = 1;
++ WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index c4ec2c2e4c861..4def6e954e486 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1988,6 +1988,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+
+ if (false) {
+ alloc_skb:
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+@@ -2027,6 +2028,7 @@ alloc_skb:
+ init_scm = false;
+ }
+
++ spin_lock(&other->sk_receive_queue.lock);
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+@@ -2057,14 +2059,11 @@ alloc_skb:
+ refcount_add(size, &sk->sk_wmem_alloc);
+
+ if (newskb) {
+- err = unix_scm_to_skb(&scm, skb, false);
+- if (err)
+- goto err_state_unlock;
+- spin_lock(&other->sk_receive_queue.lock);
++ unix_scm_to_skb(&scm, skb, false);
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
+- spin_unlock(&other->sk_receive_queue.lock);
+ }
+
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index ad30e0d8b28e9..f1109da9ece6e 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -521,7 +521,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+
+- if (re) {
++ if (re && x->replay_esn && x->preplay_esn) {
+ struct xfrm_replay_state_esn *replay_esn;
+ replay_esn = nla_data(re);
+ memcpy(x->replay_esn, replay_esn,
+@@ -1000,6 +1000,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
+ sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
++
++ /* see addr_match(), (prefix length >> 5) << 2
++ * will be used to compare xfrm_address_t
++ */
++ if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
++ filter->dplen > (sizeof(xfrm_address_t) << 3)) {
++ kfree(filter);
++ return -EINVAL;
++ }
+ }
+
+ if (attrs[XFRMA_PROTO])
+@@ -2505,7 +2514,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
+index 5c00e35367675..dc4b30d1b7168 100644
+--- a/sound/pci/emu10k1/emufx.c
++++ b/sound/pci/emu10k1/emufx.c
+@@ -1557,14 +1557,8 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ gpr += 2;
+
+ /* Master volume (will be renamed later) */
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS));
++ for (z = 0; z < 8; z++)
++ A_OP(icode, &ptr, iMAC0, A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS));
+ snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0);
+ gpr += 2;
+
+@@ -1648,102 +1642,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ dev_dbg(emu->card->dev, "emufx.c: gpr=0x%x, tmp=0x%x\n",
+ gpr, tmp);
+ */
+- /* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */
+- /* A_P16VIN(0) is delayed by one sample,
+- * so all other A_P16VIN channels will need to also be delayed
+- */
+- /* Left ADC in. 1 of 2 */
+ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) );
+- /* Right ADC in 1 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- /* Delaying by one sample: instead of copying the input
+- * value A_P16VIN to output A_FXBUS2 as in the first channel,
+- * we use an auxiliary register, delaying the value by one
+- * sample
+- */
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000);
+- /* For 96kHz mode */
+- /* Left ADC in. 2 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000);
+- /* Right ADC in 2 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000);
+- /* Pavel Hofman - we still have voices, A_FXBUS2s, and
+- * A_P16VINs available -
+- * let's add 8 more capture channels - total of 16
+- */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x10));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x12));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x14));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x16));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x18));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1a));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1c));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1e));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf),
+- A_C_00000000, A_C_00000000);
++ /* A_P16VIN(0) is delayed by one sample, so all other A_P16VIN channels
++ * will need to also be delayed; we use an auxiliary register for that. */
++ for (z = 1; z < 0x10; z++) {
++ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr), A_FXBUS2(z * 2) );
++ A_OP(icode, &ptr, iACC3, A_GPR(gpr), A_P16VIN(z), A_C_00000000, A_C_00000000);
++ gpr_map[gpr++] = 0x00000000;
++ }
+ }
+
+ #if 0
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index f05d362c4e23b..d96fa61a4bd7c 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -4957,6 +4957,8 @@ static void rt5665_i2c_shutdown(struct i2c_client *client)
+ struct rt5665_priv *rt5665 = i2c_get_clientdata(client);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
++
++ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+
+ #ifdef CONFIG_OF
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 0201737058fba..2b59918e1094f 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3540,5 +3540,34 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ }
+ }
+ },
++{
++ /* Advanced modes of the Mythware XA001AU.
++ * For the standard mode, Mythware XA001AU has ID ffad:a001
++ */
++ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "Mythware",
++ .product_name = "XA001AU",
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_IGNORE_INTERFACE,
++ },
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+
+ #undef USB_DEVICE_VENDOR_SPEC