summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1220_linux-4.19.221.patch3285
2 files changed, 3289 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ec86fab5..03cf027a 100644
--- a/0000_README
+++ b/0000_README
@@ -919,6 +919,10 @@ Patch: 1219_linux-4.19.220.patch
From: https://www.kernel.org
Desc: Linux 4.19.220
+Patch: 1220_linux-4.19.221.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.221
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1220_linux-4.19.221.patch b/1220_linux-4.19.221.patch
new file mode 100644
index 00000000..545200ee
--- /dev/null
+++ b/1220_linux-4.19.221.patch
@@ -0,0 +1,3285 @@
+diff --git a/Makefile b/Makefile
+index f243688468d53..c0676abcf60ff 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 220
++SUBLEVEL = 221
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/block/ioprio.c b/block/ioprio.c
+index f9821080c92cc..f0ee9cc33d17b 100644
+--- a/block/ioprio.c
++++ b/block/ioprio.c
+@@ -206,6 +206,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+ pgrp = task_pgrp(current);
+ else
+ pgrp = find_vpid(who);
++ read_lock(&tasklist_lock);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+ tmpio = get_task_ioprio(p);
+ if (tmpio < 0)
+@@ -215,6 +216,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+ else
+ ret = ioprio_best(ret, tmpio);
+ } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
++ read_unlock(&tasklist_lock);
++
+ break;
+ case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 9229c5c9ad473..35c13be4adc60 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4416,23 +4416,20 @@ static int binder_thread_release(struct binder_proc *proc,
+ }
+
+ /*
+- * If this thread used poll, make sure we remove the waitqueue
+- * from any epoll data structures holding it with POLLFREE.
+- * waitqueue_active() is safe to use here because we're holding
+- * the inner lock.
++ * If this thread used poll, make sure we remove the waitqueue from any
++ * poll data structures holding it.
+ */
+- if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+- waitqueue_active(&thread->wait)) {
+- wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
+- }
++ if (thread->looper & BINDER_LOOPER_STATE_POLL)
++ wake_up_pollfree(&thread->wait);
+
+ binder_inner_proc_unlock(thread->proc);
+
+ /*
+- * This is needed to avoid races between wake_up_poll() above and
+- * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+- * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+- * lock, so we can be sure it's done after calling synchronize_rcu().
++ * This is needed to avoid races between wake_up_pollfree() above and
++ * someone else removing the last entry from the queue for other reasons
++ * (e.g. ep_remove_wait_queue() being called due to an epoll file
++ * descriptor being closed). Such other users hold an RCU read lock, so
++ * we can be sure they're done after we call synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 766ebab2f0116..46eacba2613b8 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4453,6 +4453,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
+ /* Odd clown on sil3726/4726 PMPs */
+ { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
++ /* Similar story with ASMedia 1092 */
++ { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
+
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
+index 0f3a1bda3e91a..6ce0a11565e5d 100644
+--- a/drivers/clk/qcom/clk-regmap-mux.c
++++ b/drivers/clk/qcom/clk-regmap-mux.c
+@@ -36,7 +36,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
+ val &= mask;
+
+ if (mux->parent_map)
+- return qcom_find_src_index(hw, mux->parent_map, val);
++ return qcom_find_cfg_index(hw, mux->parent_map, val);
+
+ return val;
+ }
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index bfb6d6065a90c..e7550e02e4d81 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
+ }
+ EXPORT_SYMBOL_GPL(qcom_find_src_index);
+
++int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
++{
++ int i, num_parents = clk_hw_get_num_parents(hw);
++
++ for (i = 0; i < num_parents; i++)
++ if (cfg == map[i].cfg)
++ return i;
++
++ return -ENOENT;
++}
++EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
++
+ struct regmap *
+ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+ {
+diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
+index 4aa33ee70bae1..32c916be3ab19 100644
+--- a/drivers/clk/qcom/common.h
++++ b/drivers/clk/qcom/common.h
+@@ -47,6 +47,8 @@ extern void
+ qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
+ extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
+ u8 src);
++extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
++ u8 cfg);
+
+ extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate);
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 61e1953ff9219..98e91e14cefd3 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -191,14 +191,14 @@ config HID_CHERRY
+
+ config HID_CHICONY
+ tristate "Chicony devices"
+- depends on HID
++ depends on USB_HID
+ default !EXPERT
+ ---help---
+ Support for Chicony Tactical pad and special keys on Chicony keyboards.
+
+ config HID_CORSAIR
+ tristate "Corsair devices"
+- depends on HID && USB && LEDS_CLASS
++ depends on USB_HID && LEDS_CLASS
+ ---help---
+ Support for Corsair devices that are not fully compliant with the
+ HID standard.
+@@ -219,7 +219,7 @@ config HID_COUGAR
+
+ config HID_PRODIKEYS
+ tristate "Prodikeys PC-MIDI Keyboard support"
+- depends on HID && SND
++ depends on USB_HID && SND
+ select SND_RAWMIDI
+ ---help---
+ Support for Prodikeys PC-MIDI Keyboard device support.
+@@ -484,7 +484,7 @@ config HID_LENOVO
+
+ config HID_LOGITECH
+ tristate "Logitech devices"
+- depends on HID
++ depends on USB_HID
+ default !EXPERT
+ ---help---
+ Support for Logitech devices that are not fully compliant with HID standard.
+@@ -822,7 +822,7 @@ config HID_SAITEK
+
+ config HID_SAMSUNG
+ tristate "Samsung InfraRed remote control or keyboards"
+- depends on HID
++ depends on USB_HID
+ ---help---
+ Support for Samsung InfraRed remote control or keyboards.
+
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 88a5672f42cd8..800b2364e29ea 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -622,7 +622,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ drvdata->tp = &asus_i2c_tp;
+
+- if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
++ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index 397a789a41be9..218f0e090f638 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -61,8 +61,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+-
++ struct usb_interface *intf;
++
++ if (!hid_is_usb(hdev))
++ return rdesc;
++
++ intf = to_usb_interface(hdev->dev.parent);
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ /* Change usage maximum and logical maximum from 0x7fff to
+ * 0x2fff, so they don't exceed HID_MAX_USAGES */
+diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
+index ec9e060ec46cc..6ede03c9550d3 100644
+--- a/drivers/hid/hid-corsair.c
++++ b/drivers/hid/hid-corsair.c
+@@ -556,7 +556,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct corsair_drvdata *drvdata;
+- struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
++ struct usb_interface *usbif;
++
++ if (!hid_is_usb(dev))
++ return -EINVAL;
++
++ usbif = to_usb_interface(dev->dev.parent);
+
+ drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
+ GFP_KERNEL);
+diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
+index 6346282e0ff05..7139227edb287 100644
+--- a/drivers/hid/hid-elan.c
++++ b/drivers/hid/hid-elan.c
+@@ -54,7 +54,7 @@ struct elan_drvdata {
+
+ static int is_not_elan_touchpad(struct hid_device *hdev)
+ {
+- if (hdev->bus == BUS_USB) {
++ if (hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ return (intf->altsetting->desc.bInterfaceNumber !=
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index 5eea6fe0d7bd8..c3ecac13e6203 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ struct elo_priv *priv;
+ int ret;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 3e58d4c3cf2c0..51a827470157b 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -120,6 +120,8 @@ static int hammer_input_configured(struct hid_device *hdev,
+ static const struct hid_device_id hammer_devices[] = {
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
+index ab9da597106fa..2f8eb66397444 100644
+--- a/drivers/hid/hid-holtek-kbd.c
++++ b/drivers/hid/hid-holtek-kbd.c
+@@ -143,12 +143,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ static int holtek_kbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+ {
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- int ret = hid_parse(hdev);
++ struct usb_interface *intf;
++ int ret;
++
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
+
++ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
++ intf = to_usb_interface(hdev->dev.parent);
+ if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ struct hid_input *hidinput;
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
+index 78b3a0c767751..27c08ddab0e1a 100644
+--- a/drivers/hid/hid-holtek-mouse.c
++++ b/drivers/hid/hid-holtek-mouse.c
+@@ -65,6 +65,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ return rdesc;
+ }
+
++static int holtek_mouse_probe(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++ return 0;
++}
++
+ static const struct hid_device_id holtek_mouse_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+@@ -86,6 +94,7 @@ static struct hid_driver holtek_mouse_driver = {
+ .name = "holtek_mouse",
+ .id_table = holtek_mouse_devices,
+ .report_fixup = holtek_mouse_report_fixup,
++ .probe = holtek_mouse_probe,
+ };
+
+ module_hid_driver(holtek_mouse_driver);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ee5dce862a215..8d4153c73f5cf 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -479,6 +479,7 @@
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+ #define USB_DEVICE_ID_GOOGLE_DON 0x5050
++#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
+
+ #define USB_VENDOR_ID_GOTOP 0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2 0x007f
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index 17d6123f7930f..ea4e10070851a 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -714,12 +714,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
+
+ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
+- __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
++ struct usb_interface *iface;
++ __u8 iface_num;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct lg_drv_data *drv_data;
+ int ret;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
++ iface = to_usb_interface(hdev->dev.parent);
++ iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
++
+ /* G29 only work with the 1st interface */
+ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
+ (iface_num != 0)) {
+diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
+index d3773251b3745..efc995543aa11 100644
+--- a/drivers/hid/hid-prodikeys.c
++++ b/drivers/hid/hid-prodikeys.c
+@@ -802,12 +802,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int ret;
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
++ struct usb_interface *intf;
++ unsigned short ifnum;
+ unsigned long quirks = id->driver_data;
+ struct pk_device *pk;
+ struct pcmidi_snd *pm = NULL;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
++ intf = to_usb_interface(hdev->dev.parent);
++ ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
++
+ pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+ if (pk == NULL) {
+ hid_err(hdev, "can't alloc descriptor\n");
+diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
+index 329c5d1270f94..fb545a11214f0 100644
+--- a/drivers/hid/hid-roccat-arvo.c
++++ b/drivers/hid/hid-roccat-arvo.c
+@@ -347,6 +347,9 @@ static int arvo_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
+index 02db537f8f3ea..c07a7ea8a6873 100644
+--- a/drivers/hid/hid-roccat-isku.c
++++ b/drivers/hid/hid-roccat-isku.c
+@@ -327,6 +327,9 @@ static int isku_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
+index 9be8c31f613fd..ef978586ff2f5 100644
+--- a/drivers/hid/hid-roccat-kone.c
++++ b/drivers/hid/hid-roccat-kone.c
+@@ -752,6 +752,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
+index 09e8fc72aa1d4..b63de4c5b5dd3 100644
+--- a/drivers/hid/hid-roccat-koneplus.c
++++ b/drivers/hid/hid-roccat-koneplus.c
+@@ -434,6 +434,9 @@ static int koneplus_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
+index 07de2f9014c67..ef9508822e5f0 100644
+--- a/drivers/hid/hid-roccat-konepure.c
++++ b/drivers/hid/hid-roccat-konepure.c
+@@ -136,6 +136,9 @@ static int konepure_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
+index 317c9c2c0a7ce..6256c211398a1 100644
+--- a/drivers/hid/hid-roccat-kovaplus.c
++++ b/drivers/hid/hid-roccat-kovaplus.c
+@@ -504,6 +504,9 @@ static int kovaplus_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
+index ac1a7313e2596..13ae2a7d176d3 100644
+--- a/drivers/hid/hid-roccat-lua.c
++++ b/drivers/hid/hid-roccat-lua.c
+@@ -163,6 +163,9 @@ static int lua_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
+index b30aa7b82bf87..027aa9d0ec1f2 100644
+--- a/drivers/hid/hid-roccat-pyra.c
++++ b/drivers/hid/hid-roccat-pyra.c
+@@ -452,6 +452,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
+index 47cc8f30ff6d4..fda4a396a12e8 100644
+--- a/drivers/hid/hid-roccat-ryos.c
++++ b/drivers/hid/hid-roccat-ryos.c
+@@ -144,6 +144,9 @@ static int ryos_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
+index 6dbf6e04dce75..0230fb54f08a5 100644
+--- a/drivers/hid/hid-roccat-savu.c
++++ b/drivers/hid/hid-roccat-savu.c
+@@ -116,6 +116,9 @@ static int savu_probe(struct hid_device *hdev,
+ {
+ int retval;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
+index 7cbb067d4a9e3..89bb2260367f3 100644
+--- a/drivers/hid/hid-samsung.c
++++ b/drivers/hid/hid-samsung.c
+@@ -157,6 +157,9 @@ static int samsung_probe(struct hid_device *hdev,
+ int ret;
+ unsigned int cmask = HID_CONNECT_DEFAULT;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
+index 56b196d600411..e0bc31ee15769 100644
+--- a/drivers/hid/hid-uclogic.c
++++ b/drivers/hid/hid-uclogic.c
+@@ -791,6 +791,9 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
+ __u8 *p;
+ s32 v;
+
++ if (!hid_is_usb(hdev))
++ return -EINVAL;
++
+ /*
+ * Read string descriptor containing tablet parameters. The specific
+ * string descriptor and data were discovered by sniffing the Windows
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 8006732b8f424..152570b49f3b2 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -697,7 +697,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
+ * Skip the query for this type and modify defaults based on
+ * interface number.
+ */
+- if (features->type == WIRELESS) {
++ if (features->type == WIRELESS && intf) {
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
+ features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
+ else
+@@ -2188,7 +2188,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
+ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
+ char *product_name = wacom->hdev->name;
+
+- if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
++ if (hid_is_usb(wacom->hdev)) {
+ struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ product_name = dev->product;
+@@ -2419,6 +2419,9 @@ static void wacom_wireless_work(struct work_struct *work)
+
+ wacom_destroy_battery(wacom);
+
++ if (!usbdev)
++ return;
++
+ /* Stylus interface */
+ hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom1 = hid_get_drvdata(hdev1);
+@@ -2698,8 +2701,6 @@ static void wacom_mode_change_work(struct work_struct *work)
+ static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+ {
+- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+- struct usb_device *dev = interface_to_usbdev(intf);
+ struct wacom *wacom;
+ struct wacom_wac *wacom_wac;
+ struct wacom_features *features;
+@@ -2736,8 +2737,14 @@ static int wacom_probe(struct hid_device *hdev,
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
+- wacom->usbdev = dev;
+- wacom->intf = intf;
++ if (hid_is_usb(hdev)) {
++ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
++ struct usb_device *dev = interface_to_usbdev(intf);
++
++ wacom->usbdev = dev;
++ wacom->intf = intf;
++ }
++
+ mutex_init(&wacom->lock);
+ INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
+ INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index 0ca6f9de51923..52947561e446a 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -1423,8 +1423,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
+ return 0;
+
+ err_buffer_cleanup:
+- if (data->dready_trig)
+- iio_triggered_buffer_cleanup(indio_dev);
++ iio_triggered_buffer_cleanup(indio_dev);
+ err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+@@ -1447,8 +1446,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
++ iio_triggered_buffer_cleanup(indio_dev);
+ if (data->dready_trig) {
+- iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index f74cb2e082a67..6798800d4b165 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -227,14 +227,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
+ hw_values.chan,
+ sizeof(hw_values.chan));
+ if (ret) {
+- dev_err(st->dev,
+- "error reading data\n");
+- return ret;
++ dev_err(st->dev, "error reading data: %d\n", ret);
++ goto out;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &hw_values,
+ iio_get_time_ns(indio_dev));
++out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
+index 15c254b4745cc..1ef75c94987bd 100644
+--- a/drivers/iio/accel/mma8452.c
++++ b/drivers/iio/accel/mma8452.c
+@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
+ if (ret)
+ return ret;
+
+- indio_dev->trig = trig;
++ indio_dev->trig = iio_trigger_get(trig);
+
+ return 0;
+ }
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index c485ff6f408bf..129c3adad4c05 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -1375,7 +1375,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
+ *val = st->conversion_value;
+ ret = at91_adc_adjust_val_osr(st, val);
+ if (chan->scan_type.sign == 's')
+- *val = sign_extend32(*val, 11);
++ *val = sign_extend32(*val,
++ chan->scan_type.realbits - 1);
+ st->conversion_done = false;
+ }
+
+diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
+index 5be7892693536..5532a055f134f 100644
+--- a/drivers/iio/adc/axp20x_adc.c
++++ b/drivers/iio/adc/axp20x_adc.c
+@@ -254,19 +254,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+ {
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+- int size;
+
+- /*
+- * N.B.: Unlike the Chinese datasheets tell, the charging current is
+- * stored on 12 bits, not 13 bits. Only discharging current is on 13
+- * bits.
+- */
+- if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
+- size = 13;
+- else
+- size = 12;
+-
+- *val = axp20x_read_variable_width(info->regmap, chan->address, size);
++ *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
+ if (*val < 0)
+ return *val;
+
+@@ -389,9 +378,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case IIO_CURRENT:
+- *val = 0;
+- *val2 = 500000;
+- return IIO_VAL_INT_PLUS_MICRO;
++ *val = 1;
++ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *val = 100;
+diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
+index 4ab052d76d9f5..d8cfeec3945b0 100644
+--- a/drivers/iio/adc/dln2-adc.c
++++ b/drivers/iio/adc/dln2-adc.c
+@@ -251,7 +251,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
+ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
+ {
+ int ret, i;
+- struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
+ u16 conflict;
+ __le16 value;
+ int olen = sizeof(value);
+@@ -260,13 +259,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
+ .chan = channel,
+ };
+
+- ret = iio_device_claim_direct_mode(indio_dev);
+- if (ret < 0)
+- return ret;
+-
+ ret = dln2_adc_set_chan_enabled(dln2, channel, true);
+ if (ret < 0)
+- goto release_direct;
++ return ret;
+
+ ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
+ if (ret < 0) {
+@@ -303,8 +298,6 @@ disable_port:
+ dln2_adc_set_port_enabled(dln2, false, NULL);
+ disable_chan:
+ dln2_adc_set_chan_enabled(dln2, channel, false);
+-release_direct:
+- iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+ }
+@@ -340,10 +333,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret < 0)
++ return ret;
++
+ mutex_lock(&dln2->mutex);
+ ret = dln2_adc_read(dln2, chan->channel);
+ mutex_unlock(&dln2->mutex);
+
++ iio_device_release_direct_mode(indio_dev);
++
+ if (ret < 0)
+ return ret;
+
+@@ -669,7 +668,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+ iio_trigger_set_drvdata(dln2->trig, dln2);
+- devm_iio_trigger_register(dev, dln2->trig);
++ ret = devm_iio_trigger_register(dev, dln2->trig);
++ if (ret) {
++ dev_err(dev, "failed to register trigger: %d\n", ret);
++ return ret;
++ }
+ iio_trigger_set_immutable(indio_dev, dln2->trig);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
+index b080362a87669..56aac0a9b500b 100644
+--- a/drivers/iio/gyro/itg3200_buffer.c
++++ b/drivers/iio/gyro/itg3200_buffer.c
+@@ -64,9 +64,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+
++error_ret:
+ iio_trigger_notify_done(indio_dev->trig);
+
+-error_ret:
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index ce66699c7fccb..49d76fa7f9076 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -549,7 +549,6 @@ static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
+ irq_modify_status(trig->subirq_base + i,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
+ }
+- get_device(&trig->dev);
+
+ return trig;
+
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index f3fb79c231be6..e9bc23d0afb49 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+ ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
+ (u8 *)als_buf, sizeof(als_buf));
+ if (ret < 0)
+- return ret;
++ goto done;
+ if (test_bit(0, indio_dev->active_scan_mask))
+ scan.channels[j++] = le16_to_cpu(als_buf[1]);
+ if (test_bit(1, indio_dev->active_scan_mask))
+diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
+index 6e2a169da9508..4cfa0101b3f84 100644
+--- a/drivers/iio/light/stk3310.c
++++ b/drivers/iio/light/stk3310.c
+@@ -545,9 +545,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
+ mutex_lock(&data->lock);
+ ret = regmap_field_read(data->reg_flag_nf, &dir);
+ if (ret < 0) {
+- dev_err(&data->client->dev, "register read failed\n");
+- mutex_unlock(&data->lock);
+- return ret;
++ dev_err(&data->client->dev, "register read failed: %d\n", ret);
++ goto out;
+ }
+ event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
+ IIO_EV_TYPE_THRESH,
+@@ -559,6 +558,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
+ ret = regmap_field_write(data->reg_flag_psint, 0);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to reset interrupts\n");
++out:
+ mutex_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
+index 7722745e101f5..e7acb9421e53a 100644
+--- a/drivers/iio/trigger/stm32-timer-trigger.c
++++ b/drivers/iio/trigger/stm32-timer-trigger.c
+@@ -884,6 +884,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
+ };
+ module_platform_driver(stm32_timer_trigger_driver);
+
+-MODULE_ALIAS("platform: stm32-timer-trigger");
++MODULE_ALIAS("platform:stm32-timer-trigger");
+ MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index 368f4f08b6866..d9890ca1d70a9 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -1146,7 +1146,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+ rcd->egrbufs.rcvtids = NULL;
+
+ for (e = 0; e < rcd->egrbufs.alloced; e++) {
+- if (rcd->egrbufs.buffers[e].dma)
++ if (rcd->egrbufs.buffers[e].addr)
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[e].len,
+ rcd->egrbufs.buffers[e].addr,
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index c9bdc5221b82f..5849ac5a2ad3b 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ int hwirq, i;
+
+ mutex_lock(&msi_used_lock);
++ hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
++ order_base_2(nr_irqs));
++ mutex_unlock(&msi_used_lock);
+
+- hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+- 0, nr_irqs, 0);
+- if (hwirq >= PCI_MSI_DOORBELL_NR) {
+- mutex_unlock(&msi_used_lock);
++ if (hwirq < 0)
+ return -ENOSPC;
+- }
+-
+- bitmap_set(msi_used, hwirq, nr_irqs);
+- mutex_unlock(&msi_used_lock);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ NULL, NULL);
+ }
+
+- return hwirq;
++ return 0;
+ }
+
+ static void armada_370_xp_msi_free(struct irq_domain *domain,
+@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&msi_used_lock);
+- bitmap_clear(msi_used, d->hwirq, nr_irqs);
++ bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
+ }
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index b55dff1aa50b3..86334aef4bd05 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -581,7 +581,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
+
+ its_fixup_cmd(cmd);
+
+- return NULL;
++ return desc->its_invall_cmd.col;
+ }
+
+ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
+index b1777104fd9fe..9694529b709de 100644
+--- a/drivers/irqchip/irq-nvic.c
++++ b/drivers/irqchip/irq-nvic.c
+@@ -29,7 +29,7 @@
+
+ #define NVIC_ISER 0x000
+ #define NVIC_ICER 0x080
+-#define NVIC_IPR 0x300
++#define NVIC_IPR 0x400
+
+ #define NVIC_MAX_BANKS 16
+ /*
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index a31bb1da44ec9..9692a71f53565 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -18,6 +18,7 @@
+
+ #include <linux/clk.h>
+ #include <linux/completion.h>
++#include <linux/delay.h>
+ #include <linux/dmaengine.h>
+ #include <linux/dma-direction.h>
+ #include <linux/dma-mapping.h>
+@@ -700,6 +701,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
++
++ if (instr->delay_ns)
++ ndelay(instr->delay_ns);
+ }
+
+ return ret;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 2ee046c813893..61ae622125c43 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1530,14 +1530,14 @@ void bond_alb_monitor(struct work_struct *work)
+ struct slave *slave;
+
+ if (!bond_has_slaves(bond)) {
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ bond_info->lp_counter = 0;
+ goto re_arm;
+ }
+
+ rcu_read_lock();
+
+- bond_info->tx_rebalance_counter++;
++ atomic_inc(&bond_info->tx_rebalance_counter);
+ bond_info->lp_counter++;
+
+ /* send learning packets */
+@@ -1559,7 +1559,7 @@ void bond_alb_monitor(struct work_struct *work)
+ }
+
+ /* rebalance tx traffic */
+- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
++ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ tlb_clear_slave(bond, slave, 1);
+ if (slave == rcu_access_pointer(bond->curr_active_slave)) {
+@@ -1569,7 +1569,7 @@ void bond_alb_monitor(struct work_struct *work)
+ bond_info->unbalanced_load = 0;
+ }
+ }
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ }
+
+ if (bond_info->rlb_enabled) {
+@@ -1639,7 +1639,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
+ tlb_init_slave(slave);
+
+ /* order a rebalance ASAP */
+- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond->alb_info.tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+
+ if (bond->alb_info.rlb_enabled)
+ bond->alb_info.rlb_rebalance = 1;
+@@ -1676,7 +1677,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
+ rlb_clear_slave(bond, slave);
+ } else if (link == BOND_LINK_UP) {
+ /* order a rebalance ASAP */
+- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond_info->tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.rlb_rebalance = 1;
+ /* If the updelay module parameter is smaller than the
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index e87c3bb820817..f5f1367d40d5c 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -233,15 +233,15 @@ enum m_can_mram_cfg {
+
+ /* Interrupts for version 3.0.x */
+ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
+-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
+- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+- IR_RF1L | IR_RF0L)
++#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
++ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
++ IR_RF0L)
+ #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
+ /* Interrupts for version >= 3.1.x */
+ #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
+-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
+- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+- IR_RF1L | IR_RF0L)
++#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
++ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
++ IR_RF0L)
+ #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
+
+ /* Interrupt Line Select (ILS) */
+@@ -769,8 +769,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
+ {
+ if (irqstatus & IR_WDI)
+ netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
+- if (irqstatus & IR_ELO)
+- netdev_err(dev, "Error Logging Overflow\n");
+ if (irqstatus & IR_BEU)
+ netdev_err(dev, "Bit Error Uncorrected\n");
+ if (irqstatus & IR_BEC)
+diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
+index c1317889d3d8d..ced11ea892698 100644
+--- a/drivers/net/can/pch_can.c
++++ b/drivers/net/can/pch_can.c
+@@ -703,11 +703,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
+ cf->data[i + 1] = data_reg >> 8;
+ }
+
+- netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ quota--;
+ stats->rx_bytes += cf->can_dlc;
++ netif_receive_skb(skb);
+
+ pch_fifo_thresh(priv, obj_num);
+ obj_num++;
+diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
+index 381de998d2f16..fef5c59c0f4ca 100644
+--- a/drivers/net/can/sja1000/ems_pcmcia.c
++++ b/drivers/net/can/sja1000/ems_pcmcia.c
+@@ -243,7 +243,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
+ free_sja1000dev(dev);
+ }
+
+- err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
++ if (!card->channels) {
++ err = -ENODEV;
++ goto failure_cleanup;
++ }
++
++ err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ DRV_NAME, card);
+ if (!err)
+ return 0;
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 1b9957f12459a..8b5d1add899a6 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -28,10 +28,6 @@
+
+ #include "kvaser_usb.h"
+
+-/* Forward declaration */
+-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+-
+-#define CAN_USB_CLOCK 8000000
+ #define MAX_USBCAN_NET_DEVICES 2
+
+ /* Command header size */
+@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+
+ #define CMD_LEAF_LOG_MESSAGE 106
+
++/* Leaf frequency options */
++#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
++#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
++#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
++#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
++
+ /* error factors */
+ #define M16C_EF_ACKE BIT(0)
+ #define M16C_EF_CRCE BIT(1)
+@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
+ };
+ };
+
++static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
++ .name = "kvaser_usb",
++ .tseg1_min = KVASER_USB_TSEG1_MIN,
++ .tseg1_max = KVASER_USB_TSEG1_MAX,
++ .tseg2_min = KVASER_USB_TSEG2_MIN,
++ .tseg2_max = KVASER_USB_TSEG2_MAX,
++ .sjw_max = KVASER_USB_SJW_MAX,
++ .brp_min = KVASER_USB_BRP_MIN,
++ .brp_max = KVASER_USB_BRP_MAX,
++ .brp_inc = KVASER_USB_BRP_INC,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
++ .clock = {
++ .freq = 8000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
++ .clock = {
++ .freq = 16000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
++ .clock = {
++ .freq = 24000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
++static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
++ .clock = {
++ .freq = 32000000,
++ },
++ .timestamp_freq = 1,
++ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
++};
++
+ static void *
+ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
+ return rc;
+ }
+
++static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
++ const struct leaf_cmd_softinfo *softinfo)
++{
++ u32 sw_options = le32_to_cpu(softinfo->sw_options);
++
++ dev->fw_version = le32_to_cpu(softinfo->fw_version);
++ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
++
++ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
++ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
++ break;
++ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
++ break;
++ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
++ break;
++ }
++}
++
+ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+ {
+ struct kvaser_cmd cmd;
+@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+- dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
+- dev->max_tx_urbs =
+- le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
++ kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
++ dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ break;
+ }
+
+@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+ {
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+- dev->cfg = &kvaser_usb_leaf_dev_cfg;
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ return 0;
+ }
+
+-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+- .name = "kvaser_usb",
+- .tseg1_min = KVASER_USB_TSEG1_MIN,
+- .tseg1_max = KVASER_USB_TSEG1_MAX,
+- .tseg2_min = KVASER_USB_TSEG2_MIN,
+- .tseg2_max = KVASER_USB_TSEG2_MAX,
+- .sjw_max = KVASER_USB_SJW_MAX,
+- .brp_min = KVASER_USB_BRP_MIN,
+- .brp_max = KVASER_USB_BRP_MAX,
+- .brp_inc = KVASER_USB_BRP_INC,
+-};
+-
+ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ {
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
+ };
+-
+-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
+- .clock = {
+- .freq = CAN_USB_CLOCK,
+- },
+- .timestamp_freq = 1,
+- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+-};
+diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
+index c3c1195021a2b..1b4dfd357383b 100644
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -1445,16 +1445,19 @@ static int altera_tse_probe(struct platform_device *pdev)
+ priv->rxdescmem_busaddr = dma_res->start;
+
+ } else {
++ ret = -ENODEV;
+ goto err_free_netdev;
+ }
+
+- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
++ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
+ dma_set_coherent_mask(priv->device,
+ DMA_BIT_MASK(priv->dmaops->dmamask));
+- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
++ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
+- else
++ } else {
++ ret = -EIO;
+ goto err_free_netdev;
++ }
+
+ /* MAC address space */
+ ret = request_and_map(pdev, "control_port", &control_port,
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index d06a89e99872d..fcb4f4cd349a4 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -373,6 +373,9 @@ struct bufdesc_ex {
+ #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
+ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+ #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
++#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
++ (((X) == 1) ? FEC_ENET_RXF_1 : \
++ FEC_ENET_RXF_2))
+ #define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+ #define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 3fc823e9cdc9d..fcd5d845e99ae 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1441,7 +1441,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ break;
+ pkt_received++;
+
+- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
++ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 02d245970d7fa..55710028c99f3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -3580,11 +3580,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+
+ /* set this flag only after making sure all inputs are sane */
+ vf->adq_enabled = true;
+- /* num_req_queues is set when user changes number of queues via ethtool
+- * and this causes issue for default VSI(which depends on this variable)
+- * when ADq is enabled, hence reset it.
+- */
+- vf->num_req_queues = 0;
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_notify_vf_reset(vf);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 00c833cd2b3ae..e513c46bd0f64 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4404,6 +4404,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ netif_carrier_on(vsi->netdev);
+ }
+
++ /* clear this now, and the first stats read will be used as baseline */
++ vsi->stat_offsets_loaded = false;
++
+ ice_service_task_schedule(pf);
+
+ return err;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+index 73de57a09800d..4e417f839c3f0 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
+ return -ENOMEM;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+- if (!cache)
++ if (!cache) {
++ nfp_cpp_area_free(area);
+ return -ENOMEM;
++ }
+
+ cache->id = 0;
+ cache->addr = 0;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index 1976279800cd8..9d5c2e31dfe9b 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -1606,6 +1606,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ data_split = true;
+ }
+ } else {
++ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
++ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
++ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
++ qede_update_tx_producer(txq);
++ return NETDEV_TX_OK;
++ }
++
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index f98e2f417c2ea..f38dda1d92e2b 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -3496,20 +3496,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+- err = ql_wait_for_drvr_lock(qdev);
+- if (err) {
+- err = ql_adapter_initialize(qdev);
+- if (err) {
+- netdev_err(ndev, "Unable to initialize adapter\n");
+- goto err_init;
+- }
+- netdev_err(ndev, "Releasing driver lock\n");
+- ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+- } else {
++ if (!ql_wait_for_drvr_lock(qdev)) {
+ netdev_err(ndev, "Could not acquire driver lock\n");
++ err = -ENODEV;
+ goto err_lock;
+ }
+
++ err = ql_adapter_initialize(qdev);
++ if (err) {
++ netdev_err(ndev, "Unable to initialize adapter\n");
++ goto err_init;
++ }
++ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
++
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index f3f78ccdb274c..0e1306ded31ea 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -177,6 +177,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
+ /* clamp new_tx to sane values */
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
++ if (max == 0)
++ max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+
+ /* some devices set dwNtbOutMaxSize too low for the above default */
+ min = min(min, max);
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 2025261e97a1c..0410f05ccc26a 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -409,7 +409,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
+- maxp = usb_endpoint_maxp(&endpoint->desc);
++ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+@@ -425,9 +425,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+- /* Bits 12..11 are allowed only for HS periodic endpoints */
++ /* Multiple-transactions bits are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+- i = maxp & (BIT(12) | BIT(11));
++ i = maxp & USB_EP_MAXP_MULT_MASK;
+ maxp &= ~i;
+ }
+ /* fallthrough */
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index a76ed4acb5700..99550c9eb33ed 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1634,6 +1634,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ struct usb_function *f = NULL;
+ u8 endp;
+
++ if (w_length > USB_COMP_EP0_BUFSIZ) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ goto done;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
++ w_length = USB_COMP_EP0_BUFSIZ;
++ }
++ }
++
+ /* partial re-init of the response message; the function or the
+ * gadget might need to intercept e.g. a control-OUT completion
+ * when we delegate to it.
+@@ -2147,7 +2159,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
+ if (!cdev->req)
+ return -ENOMEM;
+
+- cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
++ cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+ if (!cdev->req->buf)
+ goto fail;
+
+diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
+index e1d566c9918ae..355bc7dab9d5f 100644
+--- a/drivers/usb/gadget/legacy/dbgp.c
++++ b/drivers/usb/gadget/legacy/dbgp.c
+@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
+ goto fail_1;
+ }
+
+- req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
++ req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
+ if (!req->buf) {
+ err = -ENOMEM;
+ stp = 2;
+@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
+ void *data = NULL;
+ u16 len = 0;
+
++ if (length > DBGP_REQ_LEN) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ return err;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(DBGP_REQ_LEN);
++ length = DBGP_REQ_LEN;
++ }
++ }
++
++
+ if (request == USB_REQ_GET_DESCRIPTOR) {
+ switch (value>>8) {
+ case USB_DT_DEVICE:
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 09ed5f02c24f4..848562222015d 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -109,6 +109,8 @@ enum ep0_state {
+ /* enough for the whole queue: most events invalidate others */
+ #define N_EVENT 5
+
++#define RBUF_SIZE 256
++
+ struct dev_data {
+ spinlock_t lock;
+ refcount_t count;
+@@ -143,7 +145,7 @@ struct dev_data {
+ struct dentry *dentry;
+
+ /* except this scratch i/o buffer for ep0 */
+- u8 rbuf [256];
++ u8 rbuf[RBUF_SIZE];
+ };
+
+ static inline void get_dev (struct dev_data *data)
+@@ -1332,6 +1334,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
++ if (w_length > RBUF_SIZE) {
++ if (ctrl->bRequestType == USB_DIR_OUT) {
++ return value;
++ } else {
++ /* Cast away the const, we are going to overwrite on purpose. */
++ __le16 *temp = (__le16 *)&ctrl->wLength;
++
++ *temp = cpu_to_le16(RBUF_SIZE);
++ w_length = RBUF_SIZE;
++ }
++ }
++
+ spin_lock (&dev->lock);
+ dev->setup_abort = 0;
+ if (dev->state == STATE_DEV_UNCONNECTED) {
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 224d0bdda82ff..cf5af8592f3d8 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -625,6 +625,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
+ continue;
+
+ retval = xhci_disable_slot(xhci, i);
++ xhci_free_virt_device(xhci, i);
+ if (retval)
+ xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
+ i, retval);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 15e3bf8c9e830..f5bd91752f2d2 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1236,7 +1236,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+- xhci_free_virt_device(xhci, slot_id);
+ }
+
+ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index c6eec712cc47a..0c2b726b7797c 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3810,7 +3810,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ struct xhci_slot_ctx *slot_ctx;
+ int i, ret;
+
+-#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * We called pm_runtime_get_noresume when the device was attached.
+ * Decrement the counter here to allow controller to runtime suspend
+@@ -3818,7 +3817,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_put_noidle(hcd->self.controller);
+-#endif
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+ /* If the host is halted due to driver unload, we still need to free the
+@@ -3838,9 +3836,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ }
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
+ virt_dev->udev = NULL;
+- ret = xhci_disable_slot(xhci, udev->slot_id);
+- if (ret)
+- xhci_free_virt_device(xhci, udev->slot_id);
++ xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+ }
+
+ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+@@ -3850,7 +3847,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+ u32 state;
+ int ret = 0;
+
+- command = xhci_alloc_command(xhci, false, GFP_KERNEL);
++ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
+@@ -3873,6 +3870,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
++
++ wait_for_completion(command->completion);
++
++ if (command->status != COMP_SUCCESS)
++ xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
++ slot_id, command->status);
++
++ xhci_free_command(xhci, command);
++
+ return ret;
+ }
+
+@@ -3969,23 +3975,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+
+ xhci_debugfs_create_slot(xhci, slot_id);
+
+-#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * If resetting upon resume, we can't put the controller into runtime
+ * suspend if there is a device attached.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_get_noresume(hcd->self.controller);
+-#endif
+
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+
+ disable_slot:
+- ret = xhci_disable_slot(xhci, udev->slot_id);
+- if (ret)
+- xhci_free_virt_device(xhci, udev->slot_id);
++ xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+
+ return 0;
+ }
+@@ -4114,6 +4117,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+
+ mutex_unlock(&xhci->mutex);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
++ xhci_free_virt_device(xhci, udev->slot_id);
+ if (!ret)
+ xhci_alloc_dev(hcd, udev);
+ kfree(command->completion);
+diff --git a/fs/aio.c b/fs/aio.c
+index 413ec289bfa14..9635c29b83da1 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -176,8 +176,9 @@ struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+- bool done;
+ bool cancelled;
++ bool work_scheduled;
++ bool work_need_resched;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+ };
+@@ -1616,6 +1617,51 @@ static void aio_poll_put_work(struct work_struct *work)
+ iocb_put(iocb);
+ }
+
++/*
++ * Safely lock the waitqueue which the request is on, synchronizing with the
++ * case where the ->poll() provider decides to free its waitqueue early.
++ *
++ * Returns true on success, meaning that req->head->lock was locked, req->wait
++ * is on req->head, and an RCU read lock was taken. Returns false if the
++ * request was already removed from its waitqueue (which might no longer exist).
++ */
++static bool poll_iocb_lock_wq(struct poll_iocb *req)
++{
++ wait_queue_head_t *head;
++
++ /*
++ * While we hold the waitqueue lock and the waitqueue is nonempty,
++ * wake_up_pollfree() will wait for us. However, taking the waitqueue
++ * lock in the first place can race with the waitqueue being freed.
++ *
++ * We solve this as eventpoll does: by taking advantage of the fact that
++ * all users of wake_up_pollfree() will RCU-delay the actual free. If
++ * we enter rcu_read_lock() and see that the pointer to the queue is
++ * non-NULL, we can then lock it without the memory being freed out from
++ * under us, then check whether the request is still on the queue.
++ *
++ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
++ * case the caller deletes the entry from the queue, leaving it empty.
++ * In that case, only RCU prevents the queue memory from being freed.
++ */
++ rcu_read_lock();
++ head = smp_load_acquire(&req->head);
++ if (head) {
++ spin_lock(&head->lock);
++ if (!list_empty(&req->wait.entry))
++ return true;
++ spin_unlock(&head->lock);
++ }
++ rcu_read_unlock();
++ return false;
++}
++
++static void poll_iocb_unlock_wq(struct poll_iocb *req)
++{
++ spin_unlock(&req->head->lock);
++ rcu_read_unlock();
++}
++
+ static void aio_poll_complete_work(struct work_struct *work)
+ {
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+@@ -1635,14 +1681,27 @@ static void aio_poll_complete_work(struct work_struct *work)
+ * avoid further branches in the fast path.
+ */
+ spin_lock_irq(&ctx->ctx_lock);
+- if (!mask && !READ_ONCE(req->cancelled)) {
+- add_wait_queue(req->head, &req->wait);
+- spin_unlock_irq(&ctx->ctx_lock);
+- return;
+- }
++ if (poll_iocb_lock_wq(req)) {
++ if (!mask && !READ_ONCE(req->cancelled)) {
++ /*
++ * The request isn't actually ready to be completed yet.
++ * Reschedule completion if another wakeup came in.
++ */
++ if (req->work_need_resched) {
++ schedule_work(&req->work);
++ req->work_need_resched = false;
++ } else {
++ req->work_scheduled = false;
++ }
++ poll_iocb_unlock_wq(req);
++ spin_unlock_irq(&ctx->ctx_lock);
++ return;
++ }
++ list_del_init(&req->wait.entry);
++ poll_iocb_unlock_wq(req);
++ } /* else, POLLFREE has freed the waitqueue, so we must complete */
+ list_del_init(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+- req->done = true;
+ spin_unlock_irq(&ctx->ctx_lock);
+
+ iocb_put(iocb);
+@@ -1654,13 +1713,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
+ struct poll_iocb *req = &aiocb->poll;
+
+- spin_lock(&req->head->lock);
+- WRITE_ONCE(req->cancelled, true);
+- if (!list_empty(&req->wait.entry)) {
+- list_del_init(&req->wait.entry);
+- schedule_work(&aiocb->poll.work);
+- }
+- spin_unlock(&req->head->lock);
++ if (poll_iocb_lock_wq(req)) {
++ WRITE_ONCE(req->cancelled, true);
++ if (!req->work_scheduled) {
++ schedule_work(&aiocb->poll.work);
++ req->work_scheduled = true;
++ }
++ poll_iocb_unlock_wq(req);
++ } /* else, the request was force-cancelled by POLLFREE already */
+
+ return 0;
+ }
+@@ -1677,20 +1737,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ if (mask && !(mask & req->events))
+ return 0;
+
+- list_del_init(&req->wait.entry);
+-
+- if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
++ /*
++ * Complete the request inline if possible. This requires that three
++ * conditions be met:
++ * 1. An event mask must have been passed. If a plain wakeup was done
++ * instead, then mask == 0 and we have to call vfs_poll() to get
++ * the events, so inline completion isn't possible.
++ * 2. The completion work must not have already been scheduled.
++ * 3. ctx_lock must not be busy. We have to use trylock because we
++ * already hold the waitqueue lock, so this inverts the normal
++ * locking order. Use irqsave/irqrestore because not all
++ * filesystems (e.g. fuse) call this function with IRQs disabled,
++ * yet IRQs have to be disabled before ctx_lock is obtained.
++ */
++ if (mask && !req->work_scheduled &&
++ spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ struct kioctx *ctx = iocb->ki_ctx;
+
+- /*
+- * Try to complete the iocb inline if we can. Use
+- * irqsave/irqrestore because not all filesystems (e.g. fuse)
+- * call this function with IRQs disabled and because IRQs
+- * have to be disabled before ctx_lock is obtained.
+- */
++ list_del_init(&req->wait.entry);
+ list_del(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+- req->done = true;
+ if (iocb->ki_eventfd && eventfd_signal_count()) {
+ iocb = NULL;
+ INIT_WORK(&req->work, aio_poll_put_work);
+@@ -1700,7 +1766,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ if (iocb)
+ iocb_put(iocb);
+ } else {
+- schedule_work(&req->work);
++ /*
++ * Schedule the completion work if needed. If it was already
++ * scheduled, record that another wakeup came in.
++ *
++ * Don't remove the request from the waitqueue here, as it might
++ * not actually be complete yet (we won't know until vfs_poll()
++ * is called), and we must not miss any wakeups. POLLFREE is an
++ * exception to this; see below.
++ */
++ if (req->work_scheduled) {
++ req->work_need_resched = true;
++ } else {
++ schedule_work(&req->work);
++ req->work_scheduled = true;
++ }
++
++ /*
++ * If the waitqueue is being freed early but we can't complete
++ * the request inline, we have to tear down the request as best
++ * we can. That means immediately removing the request from its
++ * waitqueue and preventing all further accesses to the
++ * waitqueue via the request. We also need to schedule the
++ * completion work (done above). Also mark the request as
++ * cancelled, to potentially skip an unneeded call to ->poll().
++ */
++ if (mask & POLLFREE) {
++ WRITE_ONCE(req->cancelled, true);
++ list_del_init(&req->wait.entry);
++
++ /*
++ * Careful: this *must* be the last step, since as soon
++ * as req->head is NULL'ed out, the request can be
++ * completed and freed, since aio_poll_complete_work()
++ * will no longer need to take the waitqueue lock.
++ */
++ smp_store_release(&req->head, NULL);
++ }
+ }
+ return 1;
+ }
+@@ -1708,6 +1810,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
++ bool queued;
+ int error;
+ };
+
+@@ -1718,11 +1821,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
+
+ /* multiple wait queues per file are not supported */
+- if (unlikely(pt->iocb->poll.head)) {
++ if (unlikely(pt->queued)) {
+ pt->error = -EINVAL;
+ return;
+ }
+
++ pt->queued = true;
+ pt->error = 0;
+ pt->iocb->poll.head = head;
+ add_wait_queue(head, &pt->iocb->poll.wait);
+@@ -1747,12 +1851,14 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+
+ req->head = NULL;
+- req->done = false;
+ req->cancelled = false;
++ req->work_scheduled = false;
++ req->work_need_resched = false;
+
+ apt.pt._qproc = aio_poll_queue_proc;
+ apt.pt._key = req->events;
+ apt.iocb = aiocb;
++ apt.queued = false;
+ apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
+
+ /* initialized the list so that we can do list_empty checks */
+@@ -1761,23 +1867,35 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+
+ mask = vfs_poll(req->file, &apt.pt) & req->events;
+ spin_lock_irq(&ctx->ctx_lock);
+- if (likely(req->head)) {
+- spin_lock(&req->head->lock);
+- if (unlikely(list_empty(&req->wait.entry))) {
+- if (apt.error)
++ if (likely(apt.queued)) {
++ bool on_queue = poll_iocb_lock_wq(req);
++
++ if (!on_queue || req->work_scheduled) {
++ /*
++ * aio_poll_wake() already either scheduled the async
++ * completion work, or completed the request inline.
++ */
++ if (apt.error) /* unsupported case: multiple queues */
+ cancel = true;
+ apt.error = 0;
+ mask = 0;
+ }
+ if (mask || apt.error) {
++ /* Steal to complete synchronously. */
+ list_del_init(&req->wait.entry);
+ } else if (cancel) {
++ /* Cancel if possible (may be too late though). */
+ WRITE_ONCE(req->cancelled, true);
+- } else if (!req->done) { /* actually waiting for an event */
++ } else if (on_queue) {
++ /*
++ * Actually waiting for an event, so add the request to
++ * active_reqs so that it can be cancelled if needed.
++ */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+- spin_unlock(&req->head->lock);
++ if (on_queue)
++ poll_iocb_unlock_wq(req);
+ }
+ if (mask) { /* no async, we'd stolen it */
+ aiocb->ki_res.res = mangle_poll(mask);
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 3c40a3bf772ce..94e0ae01db5c8 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -35,17 +35,7 @@
+
+ void signalfd_cleanup(struct sighand_struct *sighand)
+ {
+- wait_queue_head_t *wqh = &sighand->signalfd_wqh;
+- /*
+- * The lockless check can race with remove_wait_queue() in progress,
+- * but in this case its caller should run under rcu_read_lock() and
+- * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
+- */
+- if (likely(!waitqueue_active(wqh)))
+- return;
+-
+- /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
+- wake_up_poll(wqh, EPOLLHUP | POLLFREE);
++ wake_up_pollfree(&sighand->signalfd_wqh);
+ }
+
+ struct signalfd_ctx {
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index 990f794b1dd0a..8834819c0791b 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -162,6 +162,77 @@ struct tracefs_fs_info {
+ struct tracefs_mount_opts mount_opts;
+ };
+
++static void change_gid(struct dentry *dentry, kgid_t gid)
++{
++ if (!dentry->d_inode)
++ return;
++ dentry->d_inode->i_gid = gid;
++}
++
++/*
++ * Taken from d_walk, but without he need for handling renames.
++ * Nothing can be renamed while walking the list, as tracefs
++ * does not support renames. This is only called when mounting
++ * or remounting the file system, to set all the files to
++ * the given gid.
++ */
++static void set_gid(struct dentry *parent, kgid_t gid)
++{
++ struct dentry *this_parent;
++ struct list_head *next;
++
++ this_parent = parent;
++ spin_lock(&this_parent->d_lock);
++
++ change_gid(this_parent, gid);
++repeat:
++ next = this_parent->d_subdirs.next;
++resume:
++ while (next != &this_parent->d_subdirs) {
++ struct list_head *tmp = next;
++ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
++ next = tmp->next;
++
++ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++
++ change_gid(dentry, gid);
++
++ if (!list_empty(&dentry->d_subdirs)) {
++ spin_unlock(&this_parent->d_lock);
++ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
++ this_parent = dentry;
++ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
++ goto repeat;
++ }
++ spin_unlock(&dentry->d_lock);
++ }
++ /*
++ * All done at this level ... ascend and resume the search.
++ */
++ rcu_read_lock();
++ascend:
++ if (this_parent != parent) {
++ struct dentry *child = this_parent;
++ this_parent = child->d_parent;
++
++ spin_unlock(&child->d_lock);
++ spin_lock(&this_parent->d_lock);
++
++ /* go into the first sibling still alive */
++ do {
++ next = child->d_child.next;
++ if (next == &this_parent->d_subdirs)
++ goto ascend;
++ child = list_entry(next, struct dentry, d_child);
++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
++ rcu_read_unlock();
++ goto resume;
++ }
++ rcu_read_unlock();
++ spin_unlock(&this_parent->d_lock);
++ return;
++}
++
+ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+ {
+ substring_t args[MAX_OPT_ARGS];
+@@ -194,6 +265,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+ if (!gid_valid(gid))
+ return -EINVAL;
+ opts->gid = gid;
++ set_gid(tracefs_mount->mnt_root, gid);
+ break;
+ case Opt_mode:
+ if (match_octal(&args[0], &option))
+@@ -409,6 +481,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ inode->i_mode = mode;
+ inode->i_fop = fops ? fops : &tracefs_file_operations;
+ inode->i_private = data;
++ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
++ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
+ d_instantiate(dentry, inode);
+ fsnotify_create(dentry->d_parent->d_inode, dentry);
+ return end_creating(dentry);
+@@ -431,6 +505,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
+ inode->i_op = ops;
+ inode->i_fop = &simple_dir_operations;
++ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
++ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
+
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index c833948aade05..da824ba9fb9a2 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -834,6 +834,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
+ return hdev->ll_driver == driver;
+ }
+
++static inline bool hid_is_usb(struct hid_device *hdev)
++{
++ return hid_is_using_ll_driver(hdev, &usb_hid_driver);
++}
++
+ #define PM_HINT_FULLON 1<<5
+ #define PM_HINT_NORMAL 1<<1
+
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
+index 5225832bd6ff1..bb9cb84114c15 100644
+--- a/include/linux/rtnetlink.h
++++ b/include/linux/rtnetlink.h
+@@ -6,6 +6,7 @@
+ #include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/wait.h>
++#include <linux/refcount.h>
+ #include <uapi/linux/rtnetlink.h>
+
+ extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+@@ -34,6 +35,7 @@ extern void rtnl_unlock(void);
+ extern int rtnl_trylock(void);
+ extern int rtnl_is_locked(void);
+ extern int rtnl_lock_killable(void);
++extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
+
+ extern wait_queue_head_t netdev_unregistering_wq;
+ extern struct rw_semaphore pernet_ops_rwsem;
+@@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
+ return rtnl_dereference(dev->ingress_queue);
+ }
+
++static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
++{
++ return rcu_dereference(dev->ingress_queue);
++}
++
+ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+
+ #ifdef CONFIG_NET_INGRESS
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index ed7c122cb31f4..1d726d79063c5 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -191,6 +191,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
+ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+ void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
++void __wake_up_pollfree(struct wait_queue_head *wq_head);
+
+ #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
+ #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
+@@ -217,6 +218,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+ #define wake_up_interruptible_sync_poll(x, m) \
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
+
++/**
++ * wake_up_pollfree - signal that a polled waitqueue is going away
++ * @wq_head: the wait queue head
++ *
++ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
++ * lifetime is tied to a task rather than to the 'struct file' being polled,
++ * this function must be called before the waitqueue is freed so that
++ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
++ *
++ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
++ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
++ */
++static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ /*
++ * For performance reasons, we don't always take the queue lock here.
++ * Therefore, we might race with someone removing the last entry from
++ * the queue, and proceed while they still hold the queue lock.
++ * However, rcu_read_lock() is required to be held in such cases, so we
++ * can safely proceed with an RCU-delayed free.
++ */
++ if (waitqueue_active(wq_head))
++ __wake_up_pollfree(wq_head);
++}
++
+ #define ___wait_cond_timeout(condition) \
+ ({ \
+ bool __cond = (condition); \
+diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
+index 313a8d3b30696..3a6c932b6dcaf 100644
+--- a/include/net/bond_alb.h
++++ b/include/net/bond_alb.h
+@@ -142,7 +142,7 @@ struct tlb_slave_info {
+ struct alb_bond_info {
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ u32 unbalanced_load;
+- int tx_rebalance_counter;
++ atomic_t tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index edca90ef3bdc4..1a6ac924266db 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -103,6 +103,7 @@ int qdisc_set_default(const char *id);
+ void qdisc_hash_add(struct Qdisc *q, bool invisible);
+ void qdisc_hash_del(struct Qdisc *q);
+ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
++struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
+ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab,
+ struct netlink_ext_ack *extack);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 286bc674a6e79..c0147888b1555 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -108,6 +108,7 @@ struct Qdisc {
+
+ spinlock_t busylock ____cacheline_aligned_in_smp;
+ spinlock_t seqlock;
++ struct rcu_head rcu;
+ };
+
+ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+@@ -117,6 +118,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+ refcount_inc(&qdisc->refcnt);
+ }
+
++/* Intended to be used by unlocked users, when concurrent qdisc release is
++ * possible.
++ */
++
++static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
++{
++ if (qdisc->flags & TCQ_F_BUILTIN)
++ return qdisc;
++ if (refcount_inc_not_zero(&qdisc->refcnt))
++ return qdisc;
++ return NULL;
++}
++
+ static inline bool qdisc_is_running(struct Qdisc *qdisc)
+ {
+ if (qdisc->flags & TCQ_F_NOLOCK)
+@@ -559,7 +573,8 @@ void dev_deactivate_many(struct list_head *head);
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
+-void qdisc_destroy(struct Qdisc *qdisc);
++void qdisc_put(struct Qdisc *qdisc);
++void qdisc_put_unlocked(struct Qdisc *qdisc);
+ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+ unsigned int len);
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
+index 41b509f410bf9..f9c520ce4bf4e 100644
+--- a/include/uapi/asm-generic/poll.h
++++ b/include/uapi/asm-generic/poll.h
+@@ -29,7 +29,7 @@
+ #define POLLRDHUP 0x2000
+ #endif
+
+-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
++#define POLLFREE (__force __poll_t)0x4000
+
+ #define POLL_BUSY_LOOP (__force __poll_t)0x8000
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9a671f604ebfe..30ac8ee8294c3 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3761,7 +3761,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
+
+ new_range = dst_reg->off;
+ if (range_right_open)
+- new_range--;
++ new_range++;
+
+ /* Examples for register markings:
+ *
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
+index 5dd47f1103d18..a44a57fb95402 100644
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -209,6 +209,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+
++void __wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
++ /* POLLFREE must have cleared the queue. */
++ WARN_ON_ONCE(waitqueue_active(wq_head));
++}
++
+ /*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 1d37c80d023a2..67431d02ad9d0 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -946,6 +946,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
+ wb_shutdown(&bdi->wb);
+ cgwb_bdi_unregister(bdi);
+
++ /*
++ * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
++ * update the global bdi_min_ratio.
++ */
++ if (bdi->min_ratio)
++ bdi_set_min_ratio(bdi, 0);
++
+ if (bdi->dev) {
+ bdi_debug_unregister(bdi);
+ device_unregister(bdi->dev);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index e471c32e448f6..6233e9856016e 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -635,7 +635,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
+
+ ASSERT_RTNL();
+
+- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
++ n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ if (!n)
+ goto out;
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 83de32e34bb55..907dd0c7e8a66 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -130,6 +130,12 @@ int rtnl_is_locked(void)
+ }
+ EXPORT_SYMBOL(rtnl_is_locked);
+
++bool refcount_dec_and_rtnl_lock(refcount_t *r)
++{
++ return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
++}
++EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
++
+ #ifdef CONFIG_PROVE_LOCKING
+ bool lockdep_rtnl_is_held(void)
+ {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e62de979ee30c..fce32f3e42b54 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -798,7 +798,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
++ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index ee5403cbe655e..26882fd9323a2 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -148,6 +148,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++
++ /* the control block has been erased, so we have to set the
++ * iif once again.
++ * We read the receiving interface index directly from the
++ * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
++ * ip_rcv_core(...)).
++ */
++ IP6CB(skb)->iif = skb->skb_iif;
+ }
+
+ hdr->nexthdr = NEXTHDR_ROUTING;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 4884e1987562a..b6313504faed1 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1410,8 +1410,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
+ {
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+
+- nfc_device_iter_exit(iter);
+- kfree(iter);
++ if (iter) {
++ nfc_device_iter_exit(iter);
++ kfree(iter);
++ }
+
+ return 0;
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 4413aa8d4e829..435911dc9f16a 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -539,6 +539,7 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
+ struct netlink_ext_ack *extack)
+ {
+ struct tcf_block *block;
++ int err = 0;
+
+ if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+ block = tcf_block_lookup(net, block_index);
+@@ -550,55 +551,95 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
+ const struct Qdisc_class_ops *cops;
+ struct net_device *dev;
+
++ rcu_read_lock();
++
+ /* Find link */
+- dev = __dev_get_by_index(net, ifindex);
+- if (!dev)
++ dev = dev_get_by_index_rcu(net, ifindex);
++ if (!dev) {
++ rcu_read_unlock();
+ return ERR_PTR(-ENODEV);
++ }
+
+ /* Find qdisc */
+ if (!*parent) {
+ *q = dev->qdisc;
+ *parent = (*q)->handle;
+ } else {
+- *q = qdisc_lookup(dev, TC_H_MAJ(*parent));
++ *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
+ if (!*q) {
+ NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
+- return ERR_PTR(-EINVAL);
++ err = -EINVAL;
++ goto errout_rcu;
+ }
+ }
+
++ *q = qdisc_refcount_inc_nz(*q);
++ if (!*q) {
++ NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
++ err = -EINVAL;
++ goto errout_rcu;
++ }
++
+ /* Is it classful? */
+ cops = (*q)->ops->cl_ops;
+ if (!cops) {
+ NL_SET_ERR_MSG(extack, "Qdisc not classful");
+- return ERR_PTR(-EINVAL);
++ err = -EINVAL;
++ goto errout_rcu;
+ }
+
+ if (!cops->tcf_block) {
+ NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
+- return ERR_PTR(-EOPNOTSUPP);
++ err = -EOPNOTSUPP;
++ goto errout_rcu;
+ }
+
++ /* At this point we know that qdisc is not noop_qdisc,
++ * which means that qdisc holds a reference to net_device
++ * and we hold a reference to qdisc, so it is safe to release
++ * rcu read lock.
++ */
++ rcu_read_unlock();
++
+ /* Do we search for filter, attached to class? */
+ if (TC_H_MIN(*parent)) {
+ *cl = cops->find(*q, *parent);
+ if (*cl == 0) {
+ NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
+- return ERR_PTR(-ENOENT);
++ err = -ENOENT;
++ goto errout_qdisc;
+ }
+ }
+
+ /* And the last stroke */
+ block = cops->tcf_block(*q, *cl, extack);
+- if (!block)
+- return ERR_PTR(-EINVAL);
++ if (!block) {
++ err = -EINVAL;
++ goto errout_qdisc;
++ }
+ if (tcf_block_shared(block)) {
+ NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
+- return ERR_PTR(-EOPNOTSUPP);
++ err = -EOPNOTSUPP;
++ goto errout_qdisc;
+ }
+ }
+
+ return block;
++
++errout_rcu:
++ rcu_read_unlock();
++errout_qdisc:
++ if (*q) {
++ qdisc_put(*q);
++ *q = NULL;
++ }
++ return ERR_PTR(err);
++}
++
++static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
++{
++ if (q)
++ qdisc_put(q);
+ }
+
+ struct tcf_block_owner_item {
+@@ -1336,6 +1377,7 @@ replay:
+ errout:
+ if (chain)
+ tcf_chain_put(chain);
++ tcf_block_release(q, block);
+ if (err == -EAGAIN)
+ /* Replay the request. */
+ goto replay;
+@@ -1457,6 +1499,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ errout:
+ if (chain)
+ tcf_chain_put(chain);
++ tcf_block_release(q, block);
+ return err;
+ }
+
+@@ -1542,6 +1585,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ errout:
+ if (chain)
+ tcf_chain_put(chain);
++ tcf_block_release(q, block);
+ return err;
+ }
+
+@@ -1858,7 +1902,8 @@ replay:
+ chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ if (chain_index > TC_ACT_EXT_VAL_MASK) {
+ NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
+- return -EINVAL;
++ err = -EINVAL;
++ goto errout_block;
+ }
+ chain = tcf_chain_lookup(block, chain_index);
+ if (n->nlmsg_type == RTM_NEWCHAIN) {
+@@ -1870,23 +1915,27 @@ replay:
+ tcf_chain_hold(chain);
+ } else {
+ NL_SET_ERR_MSG(extack, "Filter chain already exists");
+- return -EEXIST;
++ err = -EEXIST;
++ goto errout_block;
+ }
+ } else {
+ if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+ NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
+- return -ENOENT;
++ err = -ENOENT;
++ goto errout_block;
+ }
+ chain = tcf_chain_create(block, chain_index);
+ if (!chain) {
+ NL_SET_ERR_MSG(extack, "Failed to create filter chain");
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto errout_block;
+ }
+ }
+ } else {
+ if (!chain || tcf_chain_held_by_acts_only(chain)) {
+ NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+- return -EINVAL;
++ err = -EINVAL;
++ goto errout_block;
+ }
+ tcf_chain_hold(chain);
+ }
+@@ -1930,6 +1979,8 @@ replay:
+
+ errout:
+ tcf_chain_put(chain);
++errout_block:
++ tcf_block_release(q, block);
+ if (err == -EAGAIN)
+ /* Replay the request. */
+ goto replay;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 0bb4f7a94a3c8..af035431bec60 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -315,6 +315,24 @@ out:
+ return q;
+ }
+
++struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
++{
++ struct netdev_queue *nq;
++ struct Qdisc *q;
++
++ if (!handle)
++ return NULL;
++ q = qdisc_match_from_root(dev->qdisc, handle);
++ if (q)
++ goto out;
++
++ nq = dev_ingress_queue_rcu(dev);
++ if (nq)
++ q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
++out:
++ return q;
++}
++
+ static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
+ {
+ unsigned long cl;
+@@ -928,7 +946,7 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ qdisc_notify(net, skb, n, clid, old, new);
+
+ if (old)
+- qdisc_destroy(old);
++ qdisc_put(old);
+ }
+
+ /* Graft qdisc "new" to class "classid" of qdisc "parent" or
+@@ -981,7 +999,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ qdisc_refcount_inc(new);
+
+ if (!ingress)
+- qdisc_destroy(old);
++ qdisc_put(old);
+ }
+
+ skip:
+@@ -1589,7 +1607,7 @@ graft:
+ err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
+ if (err) {
+ if (q)
+- qdisc_destroy(q);
++ qdisc_put(q);
+ return err;
+ }
+
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index 91bd5c8393036..9a1bfa13a6cd6 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -150,7 +150,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
+ pr_debug("atm_tc_put: destroying\n");
+ list_del_init(&flow->list);
+ pr_debug("atm_tc_put: qdisc %p\n", flow->q);
+- qdisc_destroy(flow->q);
++ qdisc_put(flow->q);
+ tcf_block_put(flow->block);
+ if (flow->sock) {
+ pr_debug("atm_tc_put: f_count %ld\n",
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index bc62e1b246539..0a76ad05e5ae5 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1439,7 +1439,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
+ WARN_ON(cl->filters);
+
+ tcf_block_put(cl->block);
+- qdisc_destroy(cl->q);
++ qdisc_put(cl->q);
+ qdisc_put_rtab(cl->R_tab);
+ gen_kill_estimator(&cl->rate_est);
+ if (cl != &q->link)
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index d5e22452d5976..f95dc899e989a 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -452,7 +452,7 @@ static void cbs_destroy(struct Qdisc *sch)
+ cbs_disable_offload(dev, q);
+
+ if (q->qdisc)
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ }
+
+ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index e0b0cf8a99393..cdebaed0f8cfd 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -134,7 +134,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ tca[TCA_RATE]);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to replace estimator");
+- qdisc_destroy(cl->qdisc);
++ qdisc_put(cl->qdisc);
+ kfree(cl);
+ return err;
+ }
+@@ -153,7 +153,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
+ {
+ gen_kill_estimator(&cl->rate_est);
+- qdisc_destroy(cl->qdisc);
++ qdisc_put(cl->qdisc);
+ kfree(cl);
+ }
+
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index fe030af9272c4..47a61689dadbc 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -415,7 +415,7 @@ static void dsmark_destroy(struct Qdisc *sch)
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+
+ tcf_block_put(p->block);
+- qdisc_destroy(p->q);
++ qdisc_put(p->q);
+ if (p->mv != p->embedded)
+ kfree(p->mv);
+ }
+diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
+index bcd3ca97caea1..3697cd7997678 100644
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -180,7 +180,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+ if (q) {
+ err = fifo_set_limit(q, limit);
+ if (err < 0) {
+- qdisc_destroy(q);
++ qdisc_put(q);
+ q = NULL;
+ }
+ }
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 2128b77d5cb33..b3ff610d35045 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -918,7 +918,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ if (!ops->init || ops->init(sch, NULL, extack) == 0)
+ return sch;
+
+- qdisc_destroy(sch);
++ qdisc_put(sch);
+ return NULL;
+ }
+ EXPORT_SYMBOL(qdisc_create_dflt);
+@@ -958,7 +958,14 @@ void qdisc_free(struct Qdisc *qdisc)
+ kfree((char *) qdisc - qdisc->padded);
+ }
+
+-void qdisc_destroy(struct Qdisc *qdisc)
++static void qdisc_free_cb(struct rcu_head *head)
++{
++ struct Qdisc *q = container_of(head, struct Qdisc, rcu);
++
++ qdisc_free(q);
++}
++
++static void qdisc_destroy(struct Qdisc *qdisc)
+ {
+ const struct Qdisc_ops *ops;
+ struct sk_buff *skb, *tmp;
+@@ -967,10 +974,6 @@ void qdisc_destroy(struct Qdisc *qdisc)
+ return;
+ ops = qdisc->ops;
+
+- if (qdisc->flags & TCQ_F_BUILTIN ||
+- !refcount_dec_and_test(&qdisc->refcnt))
+- return;
+-
+ #ifdef CONFIG_NET_SCHED
+ qdisc_hash_del(qdisc);
+
+@@ -995,9 +998,34 @@ void qdisc_destroy(struct Qdisc *qdisc)
+ kfree_skb_list(skb);
+ }
+
+- qdisc_free(qdisc);
++ call_rcu(&qdisc->rcu, qdisc_free_cb);
++}
++
++void qdisc_put(struct Qdisc *qdisc)
++{
++ if (qdisc->flags & TCQ_F_BUILTIN ||
++ !refcount_dec_and_test(&qdisc->refcnt))
++ return;
++
++ qdisc_destroy(qdisc);
++}
++EXPORT_SYMBOL(qdisc_put);
++
++/* Version of qdisc_put() that is called with rtnl mutex unlocked.
++ * Intended to be used as optimization, this function only takes rtnl lock if
++ * qdisc reference counter reached zero.
++ */
++
++void qdisc_put_unlocked(struct Qdisc *qdisc)
++{
++ if (qdisc->flags & TCQ_F_BUILTIN ||
++ !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
++ return;
++
++ qdisc_destroy(qdisc);
++ rtnl_unlock();
+ }
+-EXPORT_SYMBOL(qdisc_destroy);
++EXPORT_SYMBOL(qdisc_put_unlocked);
+
+ /* Attach toplevel qdisc to device queue. */
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+@@ -1318,7 +1346,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ dev_queue->qdisc_sleeping = qdisc_default;
+
+- qdisc_destroy(qdisc);
++ qdisc_put(qdisc);
+ }
+ }
+
+@@ -1327,7 +1355,7 @@ void dev_shutdown(struct net_device *dev)
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
+ if (dev_ingress_queue(dev))
+ shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
+- qdisc_destroy(dev->qdisc);
++ qdisc_put(dev->qdisc);
+ dev->qdisc = &noop_qdisc;
+
+ WARN_ON(timer_pending(&dev->watchdog_timer));
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 3278a76f68615..b18ec1f6de60c 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1092,7 +1092,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
+ struct hfsc_sched *q = qdisc_priv(sch);
+
+ tcf_block_put(cl->block);
+- qdisc_destroy(cl->qdisc);
++ qdisc_put(cl->qdisc);
+ gen_kill_estimator(&cl->rate_est);
+ if (cl != &q->root)
+ kfree(cl);
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 43c4bfe625a91..862a33b9e2e0f 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1224,7 +1224,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
+ {
+ if (!cl->level) {
+ WARN_ON(!cl->un.leaf.q);
+- qdisc_destroy(cl->un.leaf.q);
++ qdisc_put(cl->un.leaf.q);
+ }
+ gen_kill_estimator(&cl->rate_est);
+ tcf_block_put(cl->block);
+@@ -1425,7 +1425,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ /* turn parent into inner node */
+ qdisc_reset(parent->un.leaf.q);
+ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
+- qdisc_destroy(parent->un.leaf.q);
++ qdisc_put(parent->un.leaf.q);
+ if (parent->prio_activity)
+ htb_deactivate(q, parent);
+
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index 699b6bb444cea..0ab13a495af95 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -65,7 +65,7 @@ static void mq_destroy(struct Qdisc *sch)
+ if (!priv->qdiscs)
+ return;
+ for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
+- qdisc_destroy(priv->qdiscs[ntx]);
++ qdisc_put(priv->qdiscs[ntx]);
+ kfree(priv->qdiscs);
+ }
+
+@@ -119,7 +119,7 @@ static void mq_attach(struct Qdisc *sch)
+ qdisc = priv->qdiscs[ntx];
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (old)
+- qdisc_destroy(old);
++ qdisc_put(old);
+ #ifdef CONFIG_NET_SCHED
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_hash_add(qdisc, false);
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 3fd0e5dd7ae3e..64d7f876d7de2 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -40,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch)
+ for (ntx = 0;
+ ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+ ntx++)
+- qdisc_destroy(priv->qdiscs[ntx]);
++ qdisc_put(priv->qdiscs[ntx]);
+ kfree(priv->qdiscs);
+ }
+
+@@ -300,7 +300,7 @@ static void mqprio_attach(struct Qdisc *sch)
+ qdisc = priv->qdiscs[ntx];
+ old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (old)
+- qdisc_destroy(old);
++ qdisc_put(old);
+ if (ntx < dev->real_num_tx_queues)
+ qdisc_hash_add(qdisc, false);
+ }
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 1df78e361ef94..1c2f9a3ab1ca7 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -175,7 +175,7 @@ multiq_destroy(struct Qdisc *sch)
+
+ tcf_block_put(q->block);
+ for (band = 0; band < q->bands; band++)
+- qdisc_destroy(q->queues[band]);
++ qdisc_put(q->queues[band]);
+
+ kfree(q->queues);
+ }
+@@ -204,7 +204,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+ q->queues[i] = &noop_qdisc;
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
+- qdisc_destroy(child);
++ qdisc_put(child);
+ }
+ }
+
+@@ -228,7 +228,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+ qdisc_tree_reduce_backlog(old,
+ old->q.qlen,
+ old->qstats.backlog);
+- qdisc_destroy(old);
++ qdisc_put(old);
+ }
+ sch_tree_unlock(sch);
+ }
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 02d8d3fd84a5c..ad400f4f9a2d6 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -1054,7 +1054,7 @@ static void netem_destroy(struct Qdisc *sch)
+
+ qdisc_watchdog_cancel(&q->watchdog);
+ if (q->qdisc)
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ dist_free(q->delay_dist);
+ dist_free(q->slot_dist);
+ }
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index 1cbbd8c31405b..0e6f34bd9a683 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -175,7 +175,7 @@ prio_destroy(struct Qdisc *sch)
+ tcf_block_put(q->block);
+ prio_offload(sch, NULL);
+ for (prio = 0; prio < q->bands; prio++)
+- qdisc_destroy(q->queues[prio]);
++ qdisc_put(q->queues[prio]);
+ }
+
+ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+@@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+ extack);
+ if (!queues[i]) {
+ while (i > oldbands)
+- qdisc_destroy(queues[--i]);
++ qdisc_put(queues[--i]);
+ return -ENOMEM;
+ }
+ }
+@@ -220,7 +220,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
+- qdisc_destroy(child);
++ qdisc_put(child);
+ }
+
+ for (i = oldbands; i < q->bands; i++) {
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index a93402fe1a9f8..fa6ad95fb6fb4 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -524,7 +524,7 @@ set_change_agg:
+ return 0;
+
+ destroy_class:
+- qdisc_destroy(cl->qdisc);
++ qdisc_put(cl->qdisc);
+ kfree(cl);
+ return err;
+ }
+@@ -535,7 +535,7 @@ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
+
+ qfq_rm_from_agg(q, cl);
+ gen_kill_estimator(&cl->rate_est);
+- qdisc_destroy(cl->qdisc);
++ qdisc_put(cl->qdisc);
+ kfree(cl);
+ }
+
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index e42f890fa1478..0424aa747c341 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -181,7 +181,7 @@ static void red_destroy(struct Qdisc *sch)
+
+ del_timer_sync(&q->adapt_timer);
+ red_offload(sch, false);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ }
+
+ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
+@@ -236,7 +236,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
+ if (child) {
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ q->qdisc = child;
+ }
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 1aa95e761671e..81d205acb1b6a 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -470,7 +470,7 @@ static void sfb_destroy(struct Qdisc *sch)
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ tcf_block_put(q->block);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ }
+
+ static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
+@@ -524,7 +524,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ q->qdisc = child;
+
+ q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 6f74a426f159e..dd29de1418b76 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -392,7 +392,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ if (child) {
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ q->qdisc = child;
+ }
+ q->limit = qopt->limit;
+@@ -438,7 +438,7 @@ static void tbf_destroy(struct Qdisc *sch)
+ struct tbf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_cancel(&q->watchdog);
+- qdisc_destroy(q->qdisc);
++ qdisc_put(q->qdisc);
+ }
+
+ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index 507fd5210c1cd..3fc216644e0e8 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -279,6 +279,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
+ struct snd_ctl_elem_value *data,
+ int type, int count)
+ {
++ struct snd_ctl_elem_value32 __user *data32 = userdata;
+ int i, size;
+
+ if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+@@ -295,6 +296,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
+ if (copy_to_user(valuep, data->value.bytes.data, size))
+ return -EFAULT;
+ }
++ if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
++ return -EFAULT;
+ return 0;
+ }
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 2a286167460f6..2b3bd6f31e4c1 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -162,7 +162,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
+ *
+ * Return the maximum value for field PAR.
+ */
+-static unsigned int
++static int
+ snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
+ snd_pcm_hw_param_t var, int *dir)
+ {
+@@ -697,18 +697,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *oss_params,
+ struct snd_pcm_hw_params *slave_params)
+ {
+- size_t s;
+- size_t oss_buffer_size, oss_period_size, oss_periods;
+- size_t min_period_size, max_period_size;
++ ssize_t s;
++ ssize_t oss_buffer_size;
++ ssize_t oss_period_size, oss_periods;
++ ssize_t min_period_size, max_period_size;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ size_t oss_frame_size;
+
+ oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
+ params_channels(oss_params) / 8;
+
++ oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
++ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
++ NULL);
++ if (oss_buffer_size <= 0)
++ return -EINVAL;
+ oss_buffer_size = snd_pcm_plug_client_size(substream,
+- snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
+- if (!oss_buffer_size)
++ oss_buffer_size * oss_frame_size);
++ if (oss_buffer_size <= 0)
+ return -EINVAL;
+ oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
+ if (atomic_read(&substream->mmap_count)) {
+@@ -745,7 +751,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+
+ min_period_size = snd_pcm_plug_client_size(substream,
+ snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
+- if (min_period_size) {
++ if (min_period_size > 0) {
+ min_period_size *= oss_frame_size;
+ min_period_size = roundup_pow_of_two(min_period_size);
+ if (oss_period_size < min_period_size)
+@@ -754,7 +760,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+
+ max_period_size = snd_pcm_plug_client_size(substream,
+ snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
+- if (max_period_size) {
++ if (max_period_size > 0) {
+ max_period_size *= oss_frame_size;
+ max_period_size = rounddown_pow_of_two(max_period_size);
+ if (oss_period_size > max_period_size)
+@@ -767,7 +773,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
+ oss_periods = substream->oss.setup.periods;
+
+ s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
+- if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
++ if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+ s = runtime->oss.maxfrags;
+ if (oss_periods > s)
+ oss_periods = s;
+@@ -893,8 +899,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ err = -EINVAL;
+ goto failure;
+ }
+- choose_rate(substream, sparams, runtime->oss.rate);
+- snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
++
++ err = choose_rate(substream, sparams, runtime->oss.rate);
++ if (err < 0)
++ goto failure;
++ err = snd_pcm_hw_param_near(substream, sparams,
++ SNDRV_PCM_HW_PARAM_CHANNELS,
++ runtime->oss.channels, NULL);
++ if (err < 0)
++ goto failure;
+
+ format = snd_pcm_oss_format_from(runtime->oss.format);
+
+@@ -1961,7 +1974,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
+ if (runtime->oss.subdivision || runtime->oss.fragshift)
+ return -EINVAL;
+ fragshift = val & 0xffff;
+- if (fragshift >= 31)
++ if (fragshift >= 25) /* should be large enough */
+ return -EINVAL;
+ runtime->oss.fragshift = fragshift;
+ runtime->oss.maxfrags = (val >> 16) & 0xffff;
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
+index 7d2c5de380317..33ec1a744ab9b 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -440,14 +440,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct session_data *session = &data->sessions[session_id];
+
+ if (ucontrol->value.integer.value[0]) {
++ if (session->port_id == be_id)
++ return 0;
++
+ session->port_id = be_id;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+ } else {
+- if (session->port_id == be_id) {
+- session->port_id = -1;
++ if (session->port_id == -1 || session->port_id != be_id)
+ return 0;
+- }
+
++ session->port_id = -1;
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+ }
+
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index 7f91b6013ddc7..bbc9efa20dfaa 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -50,7 +50,6 @@ FEATURE_TESTS_BASIC := \
+ numa_num_possible_cpus \
+ libperl \
+ libpython \
+- libpython-version \
+ libslang \
+ libcrypto \
+ libunwind \
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 6df574750bc9c..6696a4b79614a 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -29,7 +29,6 @@ FILES= \
+ test-numa_num_possible_cpus.bin \
+ test-libperl.bin \
+ test-libpython.bin \
+- test-libpython-version.bin \
+ test-libslang.bin \
+ test-libcrypto.bin \
+ test-libunwind.bin \
+@@ -203,9 +202,6 @@ $(OUTPUT)test-libperl.bin:
+ $(OUTPUT)test-libpython.bin:
+ $(BUILD) $(FLAGS_PYTHON_EMBED)
+
+-$(OUTPUT)test-libpython-version.bin:
+- $(BUILD)
+-
+ $(OUTPUT)test-libbfd.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+
+diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
+index 8282bbe547c4e..1004a8cd7ac3c 100644
+--- a/tools/build/feature/test-all.c
++++ b/tools/build/feature/test-all.c
+@@ -14,10 +14,6 @@
+ # include "test-libpython.c"
+ #undef main
+
+-#define main main_test_libpython_version
+-# include "test-libpython-version.c"
+-#undef main
+-
+ #define main main_test_libperl
+ # include "test-libperl.c"
+ #undef main
+@@ -181,7 +177,6 @@
+ int main(int argc, char *argv[])
+ {
+ main_test_libpython();
+- main_test_libpython_version();
+ main_test_libperl();
+ main_test_hello();
+ main_test_libelf();
+diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
+deleted file mode 100644
+index 47714b942d4d3..0000000000000
+--- a/tools/build/feature/test-libpython-version.c
++++ /dev/null
+@@ -1,11 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <Python.h>
+-
+-#if PY_VERSION_HEX >= 0x03000000
+- #error
+-#endif
+-
+-int main(void)
+-{
+- return 0;
+-}
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index a328beb9f505c..8e59b42119176 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -224,8 +224,6 @@ endif
+
+ FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
+-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
+-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+
+ CFLAGS += -fno-omit-frame-pointer
+ CFLAGS += -ggdb3