diff options
author | 2020-10-29 07:18:33 -0400 | |
---|---|---|
committer | 2020-10-29 07:18:33 -0400 | |
commit | bb34eaadc95f27973636b3035add7cb29eacddfc (patch) | |
tree | 464e8b466a0e23447e4e33bc6c54c76e3cffb335 | |
parent | Linux patch 4.19.152 (diff) | |
download | linux-patches-bb34eaadc95f27973636b3035add7cb29eacddfc.tar.gz linux-patches-bb34eaadc95f27973636b3035add7cb29eacddfc.tar.bz2 linux-patches-bb34eaadc95f27973636b3035add7cb29eacddfc.zip |
Linux patch 4.19.1534.19-152
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1152_linux-4.19.153.patch | 4259 |
2 files changed, 4263 insertions, 0 deletions
diff --git a/0000_README b/0000_README index ddccc76f..f3a1010c 100644 --- a/0000_README +++ b/0000_README @@ -647,6 +647,10 @@ Patch: 1151_linux-4.19.152.patch From: https://www.kernel.org Desc: Linux 4.19.152 +Patch: 1152_linux-4.19.153.patch +From: https://www.kernel.org +Desc: Linux 4.19.153 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1152_linux-4.19.153.patch b/1152_linux-4.19.153.patch new file mode 100644 index 00000000..f5fa7160 --- /dev/null +++ b/1152_linux-4.19.153.patch @@ -0,0 +1,4259 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 30752db575870..fb129272240c9 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -558,7 +558,7 @@ + loops can be debugged more effectively on production + systems. + +- clearcpuid=BITNUM [X86] ++ clearcpuid=BITNUM[,BITNUM...] [X86] + Disable CPUID feature X for the kernel. See + arch/x86/include/asm/cpufeatures.h for the valid bit + numbers. Note the Linux specific bits are not necessarily +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt +index 7eb9366422f54..3c617d620b6f8 100644 +--- a/Documentation/networking/ip-sysctl.txt ++++ b/Documentation/networking/ip-sysctl.txt +@@ -934,12 +934,14 @@ icmp_ratelimit - INTEGER + icmp_msgs_per_sec - INTEGER + Limit maximal number of ICMP packets sent per second from this host. + Only messages whose type matches icmp_ratemask (see below) are +- controlled by this limit. ++ controlled by this limit. For security reasons, the precise count ++ of messages per second is randomized. + Default: 1000 + + icmp_msgs_burst - INTEGER + icmp_msgs_per_sec controls number of ICMP packets sent per second, + while icmp_msgs_burst controls the burst size of these packets. ++ For security reasons, the precise burst size is randomized. + Default: 50 + + icmp_ratemask - INTEGER +diff --git a/Makefile b/Makefile +index aa79ce7bfdc73..d5e93bf207998 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 152 ++SUBLEVEL = 153 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig +index c285a83cbf08f..df35ea1912e8b 100644 +--- a/arch/arc/plat-hsdk/Kconfig ++++ b/arch/arc/plat-hsdk/Kconfig +@@ -11,5 +11,6 @@ menuconfig ARC_SOC_HSDK + select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE + select CLK_HSDK ++ select RESET_CONTROLLER + select RESET_HSDK + select MIGHT_HAVE_PCI +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c +index 808efbb89b88c..02f613def40dc 100644 +--- a/arch/arm/mm/cache-l2x0.c ++++ b/arch/arm/mm/cache-l2x0.c +@@ -1261,20 +1261,28 @@ static void __init l2c310_of_parse(const struct device_node *np, + + ret = of_property_read_u32(np, "prefetch-data", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-data property value is missing\n"); + } + + ret = of_property_read_u32(np, "prefetch-instr", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-instr property value is missing\n"); + } +diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h +index 9e516fe3daaba..668d8a121f1a0 100644 +--- a/arch/powerpc/include/asm/drmem.h ++++ b/arch/powerpc/include/asm/drmem.h +@@ -12,6 +12,8 @@ + #ifndef _ASM_POWERPC_LMB_H + #define _ASM_POWERPC_LMB_H + ++#include <linux/sched.h> ++ + struct drmem_lmb { + u64 base_addr; + u32 drc_index; +@@ -27,8 +29,22 @@ struct drmem_lmb_info { + + extern struct drmem_lmb_info *drmem_info; + ++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb, ++ const struct drmem_lmb *start) ++{ ++ /* ++ * DLPAR code paths can take several milliseconds per element ++ * when interacting with firmware. Ensure that we don't ++ * unfairly monopolize the CPU. ++ */ ++ if (((++lmb - start) % 16) == 0) ++ cond_resched(); ++ ++ return lmb; ++} ++ + #define for_each_drmem_lmb_in_range(lmb, start, end) \ +- for ((lmb) = (start); (lmb) < (end); (lmb)++) ++ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start)) + + #define for_each_drmem_lmb(lmb) \ + for_each_drmem_lmb_in_range((lmb), \ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index af99716615122..494b0283f2129 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -788,7 +788,7 @@ + #define THRM1_TIN (1 << 31) + #define THRM1_TIV (1 << 30) + #define THRM1_THRES(x) ((x&0x7f)<<23) +-#define THRM3_SITV(x) ((x&0x3fff)<<1) ++#define THRM3_SITV(x) ((x & 0x1fff) << 1) + #define THRM1_TID (1<<2) + #define THRM1_TIE (1<<1) + #define THRM1_V (1<<0) +diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c +index e2ab8a111b693..a130473f16e5b 100644 +--- a/arch/powerpc/kernel/tau_6xx.c ++++ b/arch/powerpc/kernel/tau_6xx.c +@@ -13,13 +13,14 @@ + */ + + #include <linux/errno.h> +-#include <linux/jiffies.h> + #include <linux/kernel.h> + #include <linux/param.h> + #include <linux/string.h> + #include <linux/mm.h> + #include <linux/interrupt.h> + #include <linux/init.h> ++#include <linux/delay.h> ++#include <linux/workqueue.h> + + #include <asm/io.h> + #include <asm/reg.h> +@@ -39,8 +40,6 @@ static struct tau_temp + unsigned char grew; + } tau[NR_CPUS]; + +-struct timer_list tau_timer; +- + #undef DEBUG + + /* TODO: put these in a /proc interface, with some sanity checks, and maybe +@@ -50,7 +49,7 @@ struct timer_list tau_timer; + #define step_size 2 /* step size when temp goes out of range */ + #define window_expand 1 /* expand the window by this much */ + /* configurable values for shrinking the window */ +-#define shrink_timer 2*HZ /* period between shrinking the window */ ++#define shrink_timer 2000 /* period between shrinking the window */ + #define min_window 2 /* minimum window size, degrees C */ + + static void set_thresholds(unsigned long cpu) +@@ -111,11 +110,6 @@ static void TAUupdate(int cpu) + #ifdef DEBUG + printk("grew = %d\n", tau[cpu].grew); + #endif +- +-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ +- set_thresholds(cpu); +-#endif +- + } + + #ifdef CONFIG_TAU_INT +@@ -178,27 +172,27 @@ static void tau_timeout(void * info) + * complex sleep code needs to be added. One mtspr every time + * tau_timeout is called is probably not a big deal. + * +- * Enable thermal sensor and set up sample interval timer +- * need 20 us to do the compare.. until a nice 'cpu_speed' function +- * call is implemented, just assume a 500 mhz clock. It doesn't really +- * matter if we take too long for a compare since it's all interrupt +- * driven anyway. +- * +- * use a extra long time.. (60 us @ 500 mhz) ++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" ++ * recommends that "the maximum value be set in THRM3 under all ++ * conditions." + */ +- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); ++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); + + local_irq_restore(flags); + } + +-static void tau_timeout_smp(struct timer_list *unused) +-{ ++static struct workqueue_struct *tau_workq; + +- /* schedule ourselves to be run again */ +- mod_timer(&tau_timer, jiffies + shrink_timer) ; ++static void tau_work_func(struct work_struct *work) ++{ ++ msleep(shrink_timer); + on_each_cpu(tau_timeout, NULL, 0); ++ /* schedule ourselves to be run again */ ++ queue_work(tau_workq, work); + } + ++DECLARE_WORK(tau_work, tau_work_func); ++ + /* + * setup the TAU + * +@@ -231,21 +225,16 @@ static int __init TAU_init(void) + return 1; + } + +- +- /* first, set up the window shrinking timer */ +- timer_setup(&tau_timer, tau_timeout_smp, 0); +- tau_timer.expires = jiffies + shrink_timer; +- add_timer(&tau_timer); ++ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1); ++ if (!tau_workq) ++ return -ENOMEM; + + on_each_cpu(TAU_init_smp, NULL, 0); + +- printk("Thermal assist unit "); +-#ifdef CONFIG_TAU_INT +- printk("using interrupts, "); +-#else +- printk("using timers, "); +-#endif +- printk("shrink_timer: %d jiffies\n", shrink_timer); ++ queue_work(tau_workq, &tau_work); ++ ++ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n", ++ IS_ENABLED(CONFIG_TAU_INT) ? "interrupts" : "workqueue", shrink_timer); + tau_initialized = 1; + + return 0; +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c +index 31ca557af60bc..262b8c5e1b9d0 100644 +--- a/arch/powerpc/platforms/pseries/rng.c ++++ b/arch/powerpc/platforms/pseries/rng.c +@@ -40,6 +40,7 @@ static __init int rng_init(void) + + ppc_md.get_random_seed = pseries_get_random_long; + ++ of_node_put(dn); + return 0; + } + machine_subsys_initcall(pseries, rng_init); +diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c +index bbc839a98c414..003deaabb5680 100644 +--- a/arch/powerpc/sysdev/xics/icp-hv.c ++++ b/arch/powerpc/sysdev/xics/icp-hv.c +@@ -179,6 +179,7 @@ int icp_hv_init(void) + + icp_ops = &icp_hv_ops; + ++ of_node_put(np); + return 0; + } + +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c +index 3210fee27e7f9..0014d26391fa6 100644 +--- a/arch/x86/events/amd/iommu.c ++++ b/arch/x86/events/amd/iommu.c +@@ -387,7 +387,7 @@ static __init int _init_events_attrs(void) + while (amd_iommu_v2_event_descs[i].attr.attr.name) + i++; + +- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); ++ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + +diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c +index 6abd83572b016..9692ccc583bb3 100644 +--- a/arch/x86/kernel/fpu/init.c ++++ b/arch/x86/kernel/fpu/init.c +@@ -249,9 +249,9 @@ static void __init fpu__init_system_ctx_switch(void) + */ + static void __init fpu__init_parse_early_param(void) + { +- char arg[32]; ++ char arg[128]; + char *argptr = arg; +- int bit; ++ int arglen, res, bit; + + if (cmdline_find_option_bool(boot_command_line, "no387")) + setup_clear_cpu_cap(X86_FEATURE_FPU); +@@ -271,12 +271,26 @@ static void __init fpu__init_parse_early_param(void) + if (cmdline_find_option_bool(boot_command_line, "noxsaves")) + setup_clear_cpu_cap(X86_FEATURE_XSAVES); + +- if (cmdline_find_option(boot_command_line, "clearcpuid", arg, +- sizeof(arg)) && +- get_option(&argptr, &bit) && +- bit >= 0 && +- bit < NCAPINTS * 32) +- setup_clear_cpu_cap(bit); ++ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); ++ if (arglen <= 0) ++ return; ++ ++ pr_info("Clearing CPUID bits:"); ++ do { ++ res = get_option(&argptr, &bit); ++ if (res == 0 || res == 3) ++ break; ++ ++ /* If the argument was too long, the last bit may be cut off */ ++ if (res == 1 && arglen >= sizeof(arg)) ++ break; ++ ++ if (bit >= 0 && bit < NCAPINTS * 32) { ++ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); ++ setup_clear_cpu_cap(bit); ++ } ++ } while (res == 2); ++ pr_cont("\n"); + } + + /* +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 0f8b9b900b0e7..996eb53f8eb75 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -104,7 +104,6 @@ fs_initcall(nmi_warning_debugfs); + + static void nmi_check_duration(struct nmiaction *action, u64 duration) + { +- u64 whole_msecs = READ_ONCE(action->max_duration); + int remainder_ns, decimal_msecs; + + if (duration < nmi_longest_ns || duration < action->max_duration) +@@ -112,12 +111,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) + + action->max_duration = duration; + +- remainder_ns = do_div(whole_msecs, (1000 * 1000)); ++ remainder_ns = do_div(duration, (1000 * 1000)); + decimal_msecs = remainder_ns / 1000; + + printk_ratelimited(KERN_INFO + "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", +- action->handler, whole_msecs, decimal_msecs); ++ action->handler, duration, decimal_msecs); + } + + static int nmi_handle(unsigned int type, struct pt_regs *regs) +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index a2ff5c214738a..5faa49a95ac97 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -6225,6 +6225,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) + cond_resched_lock(&kvm->mmu_lock); + } + } ++ kvm_mmu_commit_zap_page(kvm, &invalid_list); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index cb09a0ec87500..a0c3d1b4b295b 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -5380,6 +5380,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, + * - Tell IOMMU to use legacy mode for this interrupt. + * - Retrieve ga_tag of prior interrupt remapping data. + */ ++ pi.prev_ga_tag = 0; + pi.is_guest_mode = false; + ret = irq_set_vcpu_affinity(host_irq, &pi); + +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c +index b4f16202ab7af..182783801ffa6 100644 +--- a/crypto/algif_aead.c ++++ b/crypto/algif_aead.c +@@ -82,7 +82,7 @@ static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, + SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); + + skcipher_request_set_tfm(skreq, null_tfm); +- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + skcipher_request_set_crypt(skreq, src, dst, len, NULL); + +@@ -295,19 +295,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, + areq->outlen = outlen; + + aead_request_set_callback(&areq->cra_u.aead_req, +- CRYPTO_TFM_REQ_MAY_BACKLOG, ++ CRYPTO_TFM_REQ_MAY_SLEEP, + af_alg_async_cb, areq); + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req); + + /* AIO operation in progress */ +- if (err == -EINPROGRESS || err == -EBUSY) ++ if (err == -EINPROGRESS) + return -EIOCBQUEUED; + + sock_put(sk); + } else { + /* Synchronous operation */ + aead_request_set_callback(&areq->cra_u.aead_req, ++ CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index 1cb106c46043d..9d2e9783c0d4e 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -127,7 +127,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + + /* AIO operation in progress */ +- if (err == -EINPROGRESS || err == -EBUSY) ++ if (err == -EINPROGRESS) + return -EIOCBQUEUED; + + sock_put(sk); +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 112b5b50ad3c4..cda4f7eb58ead 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -285,7 +285,7 @@ struct binder_device { + struct binder_work { + struct list_head entry; + +- enum { ++ enum binder_work_type { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, +@@ -895,27 +895,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked( + return w; + } + +-/** +- * binder_dequeue_work_head() - Dequeues the item at head of list +- * @proc: binder_proc associated with list +- * @list: list to dequeue head +- * +- * Removes the head of the list if there are items on the list +- * +- * Return: pointer dequeued binder_work, NULL if list was empty +- */ +-static struct binder_work *binder_dequeue_work_head( +- struct binder_proc *proc, +- struct list_head *list) +-{ +- struct binder_work *w; +- +- binder_inner_proc_lock(proc); +- w = binder_dequeue_work_head_ilocked(list); +- binder_inner_proc_unlock(proc); +- return w; +-} +- + static void + binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); + static void binder_free_thread(struct binder_thread *thread); +@@ -4242,13 +4221,17 @@ static void binder_release_work(struct binder_proc *proc, + struct list_head *list) + { + struct binder_work *w; ++ enum binder_work_type wtype; + + while (1) { +- w = binder_dequeue_work_head(proc, list); ++ binder_inner_proc_lock(proc); ++ w = binder_dequeue_work_head_ilocked(list); ++ wtype = w ? w->type : 0; ++ binder_inner_proc_unlock(proc); + if (!w) + return; + +- switch (w->type) { ++ switch (wtype) { + case BINDER_WORK_TRANSACTION: { + struct binder_transaction *t; + +@@ -4282,9 +4265,11 @@ static void binder_release_work(struct binder_proc *proc, + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } break; ++ case BINDER_WORK_NODE: ++ break; + default: + pr_err("unexpected work type, %d, not freed\n", +- w->type); ++ wtype); + break; + } + } +diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c +index efeb8137ec67f..48560e646e53e 100644 +--- a/drivers/bluetooth/hci_ldisc.c ++++ b/drivers/bluetooth/hci_ldisc.c +@@ -545,6 +545,7 @@ static void hci_uart_tty_close(struct tty_struct *tty) + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + percpu_up_write(&hu->proto_lock); + ++ cancel_work_sync(&hu->init_ready); + cancel_work_sync(&hu->write_work); + + if (hdev) { +diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c +index d3fb0d657fa52..7b3aade431e5e 100644 +--- a/drivers/bluetooth/hci_serdev.c ++++ b/drivers/bluetooth/hci_serdev.c +@@ -369,6 +369,8 @@ void hci_uart_unregister_device(struct hci_uart *hu) + struct hci_dev *hdev = hu->hdev; + + clear_bit(HCI_UART_PROTO_READY, &hu->flags); ++ ++ cancel_work_sync(&hu->init_ready); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); + hci_free_dev(hdev); +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c +index c5f98cafc25c9..9b0b490d70ff4 100644 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c +@@ -486,6 +486,12 @@ remove_opp: + /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ + late_initcall(armada37xx_cpufreq_driver_init); + ++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = { ++ { .compatible = "marvell,armada-3700-nb-pm" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match); ++ + MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); + MODULE_DESCRIPTION("Armada 37xx cpufreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c +index 28d24118c6450..f1ca66147c287 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c +@@ -1057,6 +1057,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + ndev = n->dev; + if (!ndev) + goto free_dst; ++ if (is_vlan_dev(ndev)) ++ ndev = vlan_dev_real_dev(ndev); ++ + port_id = cxgb4_port_idx(ndev); + + csk = chtls_sock_create(cdev); +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c +index 2c1f3ddb0cc79..e9573e7f9e803 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c +@@ -914,9 +914,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) + return (__force int)cpu_to_be16(thdr->length); + } + +-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) ++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk) + { +- return (cdev->max_host_sndbuf - sk->sk_wmem_queued); ++ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); + } + + static int csk_wait_memory(struct chtls_dev *cdev, +@@ -1217,6 +1217,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, + copied = 0; + csk = rcu_dereference_sk_user_data(sk); + cdev = csk->cdev; ++ lock_sock(sk); + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); + + err = sk_stream_wait_connect(sk, &timeo); +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c +index 27f7dad2d45d9..9b7b8558db31d 100644 +--- a/drivers/crypto/ixp4xx_crypto.c ++++ b/drivers/crypto/ixp4xx_crypto.c +@@ -530,7 +530,7 @@ static void release_ixp_crypto(struct device *dev) + + if (crypt_virt) { + dma_free_coherent(dev, +- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), ++ NPE_QLEN * sizeof(struct crypt_ctl), + crypt_virt, crypt_phys); + } + } +diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c +index ee0404e27a0f2..e4d7ef3bfb61d 100644 +--- a/drivers/crypto/mediatek/mtk-platform.c ++++ b/drivers/crypto/mediatek/mtk-platform.c +@@ -446,7 +446,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp) + static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) + { + struct mtk_ring **ring = cryp->ring; +- int i, err = ENOMEM; ++ int i; + + for (i = 0; i < MTK_RING_MAX; i++) { + ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); +@@ -473,14 +473,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) + return 0; + + err_cleanup: +- for (; i--; ) { ++ do { + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, + ring[i]->res_base, ring[i]->res_dma); + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, + ring[i]->cmd_base, ring[i]->cmd_dma); + kfree(ring[i]); +- } +- return err; ++ } while (i--); ++ return -ENOMEM; + } + + static int mtk_crypto_probe(struct platform_device *pdev) +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c +index 2faaa4069cdd8..4d31ef4724366 100644 +--- a/drivers/crypto/omap-sham.c ++++ b/drivers/crypto/omap-sham.c +@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val, mask; + ++ if (likely(ctx->digcnt)) ++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); ++ + /* + * Setting ALGO_CONST only for the first iteration and + * CLOSE_HASH only for the last one. Note that flags mode bits +diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c +index e2491754c468f..1ef47f7208b92 100644 +--- a/drivers/crypto/picoxcell_crypto.c ++++ b/drivers/crypto/picoxcell_crypto.c +@@ -1701,11 +1701,6 @@ static int spacc_probe(struct platform_device *pdev) + goto err_clk_put; + } + +- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); +- if (ret) +- goto err_clk_disable; +- +- + /* + * Use an IRQ threshold of 50% as a default. This seems to be a + * reasonable trade off of latency against throughput but can be +@@ -1713,6 +1708,10 @@ static int spacc_probe(struct platform_device *pdev) + */ + engine->stat_irq_thresh = (engine->fifo_sz / 2); + ++ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); ++ if (ret) ++ goto err_clk_disable; ++ + /* + * Configure the interrupts. We only use the STAT_CNT interrupt as we + * only submit a new packet for processing when we complete another in +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c +index b506eef6b146d..858ef4e15180b 100644 +--- a/drivers/edac/i5100_edac.c ++++ b/drivers/edac/i5100_edac.c +@@ -1072,16 +1072,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + PCI_DEVICE_ID_INTEL_5100_19, 0); + if (!einj) { + ret = -ENODEV; +- goto bail_einj; ++ goto bail_mc_free; + } + + rc = pci_enable_device(einj); + if (rc < 0) { + ret = rc; +- goto bail_disable_einj; ++ goto bail_einj; + } + +- + mci->pdev = &pdev->dev; + + priv = mci->pvt_info; +@@ -1147,14 +1146,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + bail_scrub: + priv->scrub_enable = 0; + cancel_delayed_work_sync(&(priv->i5100_scrubbing)); +- edac_mc_free(mci); +- +-bail_disable_einj: + pci_disable_device(einj); + + bail_einj: + pci_dev_put(einj); + ++bail_mc_free: ++ edac_mc_free(mci); ++ + bail_disable_ch1: + pci_disable_device(ch1mm); + +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c +index 6ac26d1b929f0..3247689467435 100644 +--- a/drivers/edac/ti_edac.c ++++ b/drivers/edac/ti_edac.c +@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev) + + /* add EMIF ECC error handler */ + error_irq = platform_get_irq(pdev, 0); +- if (!error_irq) { ++ if (error_irq < 0) { ++ ret = error_irq; + edac_printk(KERN_ERR, EDAC_MOD_NAME, + "EMIF irq number not defined.\n"); + goto err; +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c +index 90ed20083009f..05eba6dec5ebf 100644 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c +@@ -2119,7 +2119,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + cdv_intel_edp_panel_vdd_off(gma_encoder); +- if (ret == 0) { ++ if (ret <= 0) { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + cdv_intel_dp_encoder_destroy(encoder); +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index a9da1526c40ae..11bd2ca22a2e6 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -796,7 +796,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x3b: /* Battery Strength */ + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; +- goto ignore; ++ return; + + case 0x3c: /* Invert */ + map_key_clear(BTN_TOOL_RUBBER); +@@ -1052,7 +1052,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case HID_DC_BATTERYSTRENGTH: + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; +- goto ignore; ++ return; + } + goto unknown; + +diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c +index bf4675a273965..9be8c31f613fd 100644 +--- a/drivers/hid/hid-roccat-kone.c ++++ b/drivers/hid/hid-roccat-kone.c +@@ -297,31 +297,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, + struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval = 0, difference, old_profile; ++ struct kone_settings *settings = (struct kone_settings *)buf; + + /* I need to get my data in one piece */ + if (off != 0 || count != sizeof(struct kone_settings)) + return -EINVAL; + + mutex_lock(&kone->kone_lock); +- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); ++ difference = memcmp(settings, &kone->settings, ++ sizeof(struct kone_settings)); + if (difference) { +- retval = kone_set_settings(usb_dev, +- (struct kone_settings const *)buf); +- if (retval) { +- mutex_unlock(&kone->kone_lock); +- return retval; ++ if (settings->startup_profile < 1 || ++ settings->startup_profile > 5) { ++ retval = -EINVAL; ++ goto unlock; + } + ++ retval = kone_set_settings(usb_dev, settings); ++ if (retval) ++ goto unlock; ++ + old_profile = kone->settings.startup_profile; +- memcpy(&kone->settings, buf, sizeof(struct kone_settings)); ++ memcpy(&kone->settings, settings, sizeof(struct kone_settings)); + + kone_profile_activated(kone, kone->settings.startup_profile); + + if (kone->settings.startup_profile != old_profile) + kone_profile_report(kone, kone->settings.startup_profile); + } ++unlock: + mutex_unlock(&kone->kone_lock); + ++ if (retval) ++ return retval; ++ + return sizeof(struct kone_settings); + } + static BIN_ATTR(settings, 0660, kone_sysfs_read_settings, +diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c +index 47576c4600105..9af5ab52ca31c 100644 +--- a/drivers/hwmon/pmbus/max34440.c ++++ b/drivers/hwmon/pmbus/max34440.c +@@ -400,7 +400,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +@@ -431,7 +430,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +@@ -467,7 +465,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 2acc30c3d5b2d..01052de6bedbf 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -588,6 +588,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) + list_move_tail(&uevent->list, &list); + } + list_del(&ctx->list); ++ events_reported = ctx->events_reported; + mutex_unlock(&ctx->file->mut); + + list_for_each_entry_safe(uevent, tmp, &list, list) { +@@ -597,7 +598,6 @@ static int ucma_free_ctx(struct ucma_context *ctx) + kfree(uevent); + } + +- events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); + kfree(ctx); + return events_reported; +@@ -1476,7 +1476,9 @@ static ssize_t ucma_process_join(struct ucma_file *file, + return 0; + + err3: ++ mutex_lock(&ctx->mutex); + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ++ mutex_unlock(&ctx->mutex); + ucma_cleanup_mc_events(mc); + err2: + mutex_lock(&mut); +@@ -1644,7 +1646,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, + + cur_file = ctx->file; + if (cur_file == new_file) { ++ mutex_lock(&cur_file->mut); + resp.events_reported = ctx->events_reported; ++ mutex_unlock(&cur_file->mut); + goto response; + } + +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c +index 8c79a480f2b76..d3e11503e67ca 100644 +--- a/drivers/infiniband/hw/mlx4/cm.c ++++ b/drivers/infiniband/hw/mlx4/cm.c +@@ -307,6 +307,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) + if (!sriov->is_going_down) { + id->scheduled_delete = 1; + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); ++ } else if (id->scheduled_delete) { ++ /* Adjust timeout if already scheduled */ ++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); + } + spin_unlock_irqrestore(&sriov->going_down_lock, flags); + spin_unlock(&sriov->id_map_lock); +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c +index 5aaa2a6c431b6..418b9312fb2d7 100644 +--- a/drivers/infiniband/hw/mlx4/mad.c ++++ b/drivers/infiniband/hw/mlx4/mad.c +@@ -1305,6 +1305,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); + } + ++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) ++{ ++ unsigned long flags; ++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; ++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ++ ++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags); ++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) ++ queue_work(ctx->wi_wq, &ctx->work); ++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); ++} ++ + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, + struct mlx4_ib_demux_pv_qp *tun_qp, + int index) +@@ -2000,7 +2012,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + cq_size *= 2; + + cq_attr.cqe = cq_size; +- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ++ ctx->cq = ib_create_cq(ctx->ib_dev, ++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler, + NULL, ctx, &cq_attr); + if (IS_ERR(ctx->cq)) { + ret = PTR_ERR(ctx->cq); +@@ -2037,6 +2050,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); + + ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; + + ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); + if (ret) { +@@ -2180,7 +2194,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_mcg; + } + +- snprintf(name, sizeof name, "mlx4_ibt%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibt%d", port); + ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->wq) { + pr_err("Failed to create tunnelling WQ for port %d\n", port); +@@ -2188,7 +2202,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_wq; + } + +- snprintf(name, sizeof name, "mlx4_ibud%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port); ++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); ++ if (!ctx->wi_wq) { ++ pr_err("Failed to create wire WQ for port %d\n", port); ++ ret = -ENOMEM; ++ goto err_wiwq; ++ } ++ ++ snprintf(name, sizeof(name), "mlx4_ibud%d", port); + ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->ud_wq) { + pr_err("Failed to create up/down WQ for port %d\n", port); +@@ -2199,6 +2221,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + return 0; + + err_udwq: ++ destroy_workqueue(ctx->wi_wq); ++ ctx->wi_wq = NULL; ++ ++err_wiwq: + destroy_workqueue(ctx->wq); + ctx->wq = NULL; + +@@ -2246,12 +2272,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) + ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; + } + flush_workqueue(ctx->wq); ++ flush_workqueue(ctx->wi_wq); + for (i = 0; i < dev->dev->caps.sqp_demux; i++) { + destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); + free_pv_object(dev, i, ctx->port); + } + kfree(ctx->tun); + destroy_workqueue(ctx->ud_wq); ++ destroy_workqueue(ctx->wi_wq); + destroy_workqueue(ctx->wq); + } + } +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h +index e10dccc7958f1..76ca67aa40158 100644 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h +@@ -464,6 +464,7 @@ struct mlx4_ib_demux_pv_ctx { + struct ib_pd *pd; + struct work_struct work; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct mlx4_ib_demux_pv_qp qp[2]; + }; + +@@ -471,6 +472,7 @@ struct mlx4_ib_demux_ctx { + struct ib_device *ib_dev; + int port; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct workqueue_struct *ud_wq; + spinlock_t ud_lock; + atomic64_t subnet_prefix; +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c +index d1680d3b58250..2a82661620fe7 100644 +--- a/drivers/infiniband/hw/qedr/main.c ++++ b/drivers/infiniband/hw/qedr/main.c +@@ -604,7 +604,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); + + /* Part 2 - check capabilities */ +- page_size = ~dev->attr.page_size_caps + 1; ++ page_size = ~qed_attr->page_size_caps + 1; + if (page_size > PAGE_SIZE) { + DP_ERR(dev, + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c +index 7b26afc7fef35..f847f0a9f204d 100644 +--- a/drivers/infiniband/hw/qedr/verbs.c ++++ b/drivers/infiniband/hw/qedr/verbs.c +@@ -2522,7 +2522,7 @@ int qedr_query_qp(struct ib_qp *ibqp, + qp_attr->cap.max_recv_wr = qp->rq.max_wr; + qp_attr->cap.max_send_sge = qp->sq.max_sges; + qp_attr->cap.max_recv_sge = qp->rq.max_sges; +- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; ++ qp_attr->cap.max_inline_data = dev->attr.max_inline; + qp_init_attr->cap = qp_attr->cap; + + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; +diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c +index 12e79f9e32d53..d9a9644306096 100644 +--- a/drivers/media/i2c/m5mols/m5mols_core.c ++++ b/drivers/media/i2c/m5mols/m5mols_core.c +@@ -768,7 +768,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable) + + ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); + if (ret) { +- info->set_power(&client->dev, 0); ++ if (info->set_power) ++ info->set_power(&client->dev, 0); + return ret; + } + +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c +index e4c0a27b636aa..d9bc3851bf63b 100644 +--- a/drivers/media/i2c/tc358743.c ++++ b/drivers/media/i2c/tc358743.c +@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = { + .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable, + }; + +-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, +- bool *handled) ++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus, ++ bool *handled) + { + struct tc358743_state *state = to_state(sd); + unsigned int cec_rxint, cec_txint; +@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, + cec_transmit_attempt_done(state->cec_adap, + CEC_TX_STATUS_ERROR); + } +- *handled = true; ++ if (handled) ++ *handled = true; + } + if ((intstatus & MASK_CEC_RINT) && + (cec_rxint & MASK_CECRIEND)) { +@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, + msg.msg[i] = v & 0xff; + } + cec_received_msg(state->cec_adap, &msg); +- *handled = true; ++ if (handled) ++ *handled = true; + } + i2c_wr16(sd, INTSTATUS, + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); +@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + + #ifdef CONFIG_VIDEO_TC358743_CEC + if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) { +- tc358743_cec_isr(sd, intstatus, handled); ++ tc358743_cec_handler(sd, intstatus, handled); + i2c_wr16(sd, INTSTATUS, + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); + intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT); +@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + static irqreturn_t tc358743_irq_handler(int irq, void *dev_id) + { + struct tc358743_state *state = dev_id; +- bool handled; ++ bool handled = false; + + tc358743_isr(&state->sd, 0, &handled); + +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c +index 2d25a197dc657..f5fca01f3248e 100644 +--- a/drivers/media/platform/exynos4-is/media-dev.c ++++ b/drivers/media/platform/exynos4-is/media-dev.c +@@ -1257,11 +1257,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) + if (IS_ERR(pctl->state_default)) + return PTR_ERR(pctl->state_default); + ++ /* PINCTRL_STATE_IDLE is optional */ + pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, + PINCTRL_STATE_IDLE); +- if (IS_ERR(pctl->state_idle)) +- return PTR_ERR(pctl->state_idle); +- + return 0; + } + +diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c +index 419e1cb10dc66..f4be4c672d40e 100644 +--- a/drivers/media/platform/mx2_emmaprp.c ++++ b/drivers/media/platform/mx2_emmaprp.c +@@ -929,8 +929,11 @@ static int emmaprp_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, pcdev); + + irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; ++ if (irq < 0) { ++ ret = irq; ++ goto rel_vdev; ++ } ++ + ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, + dev_name(&pdev->dev), pcdev); + if (ret) +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index addd03b517481..00e52f0b8251b 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -2265,8 +2265,10 @@ static int isp_probe(struct platform_device *pdev) + mem = platform_get_resource(pdev, IORESOURCE_MEM, i); + isp->mmio_base[map_idx] = + devm_ioremap_resource(isp->dev, mem); +- if (IS_ERR(isp->mmio_base[map_idx])) +- return PTR_ERR(isp->mmio_base[map_idx]); ++ if (IS_ERR(isp->mmio_base[map_idx])) { ++ ret = PTR_ERR(isp->mmio_base[map_idx]); ++ goto error; ++ } + } + + ret = isp_get_clocks(isp); +diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c +index 008afb85023be..3c5b9082ad723 100644 +--- a/drivers/media/platform/qcom/camss/camss-csiphy.c ++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c +@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_sync(dev); + return ret; ++ } + + ret = csiphy_set_clock_rates(csiphy); + if (ret < 0) { +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index 5c6b00737fe75..05c712e00a2a7 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp) + return 0; + + ret = pm_runtime_get_sync(fcp->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(fcp->dev); + return ret; ++ } + + return 0; + } +diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c +index 92323310f7352..70a8cc433a03f 100644 +--- a/drivers/media/platform/rcar-vin/rcar-dma.c ++++ b/drivers/media/platform/rcar-vin/rcar-dma.c +@@ -1323,8 +1323,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) + int ret; + + ret = pm_runtime_get_sync(vin->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(vin->dev); + return ret; ++ } + + /* Make register writes take effect immediately. */ + vnmc = rvin_read(vin, VNMC_REG); +diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c +index 356821c2dacf0..0932f1445deab 100644 +--- a/drivers/media/platform/rockchip/rga/rga-buf.c ++++ b/drivers/media/platform/rockchip/rga/rga-buf.c +@@ -89,6 +89,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count) + + ret = pm_runtime_get_sync(rga->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(rga->dev); + rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); + return ret; + } +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +index 5e080f32b0e82..95abf2bd7ebae 100644 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +@@ -83,8 +83,10 @@ int s5p_mfc_power_on(void) + int i, ret = 0; + + ret = pm_runtime_get_sync(pm->device); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(pm->device); + return ret; ++ } + + /* clock control */ + for (i = 0; i < pm->num_clocks; i++) { +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c +index 18d0b56417894..ee1a211797673 100644 +--- a/drivers/media/platform/stm32/stm32-dcmi.c ++++ b/drivers/media/platform/stm32/stm32-dcmi.c +@@ -587,7 +587,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) + if (ret < 0) { + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n", + __func__, ret); +- goto err_release_buffers; ++ goto err_pm_put; + } + + /* Enable stream on the sub device */ +@@ -682,8 +682,6 @@ err_subdev_streamoff: + + err_pm_put: + pm_runtime_put(dcmi->dev); +- +-err_release_buffers: + spin_lock_irq(&dcmi->irqlock); + /* + * Return all buffers to vb2 in QUEUED state. +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c +index a285b9db7ee86..70a8371b7e9a1 100644 +--- a/drivers/media/platform/ti-vpe/vpe.c ++++ b/drivers/media/platform/ti-vpe/vpe.c +@@ -2451,6 +2451,8 @@ static int vpe_runtime_get(struct platform_device *pdev) + + r = pm_runtime_get_sync(&pdev->dev); + WARN_ON(r < 0); ++ if (r) ++ pm_runtime_put_noidle(&pdev->dev); + return r < 0 ? r : 0; + } + +diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c +index 29c1473f2e9f6..81e24cf0c8b80 100644 +--- a/drivers/media/tuners/tuner-simple.c ++++ b/drivers/media/tuners/tuner-simple.c +@@ -499,7 +499,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer) + case TUNER_TENA_9533_DI: + case TUNER_YMEC_TVF_5533MF: + tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n"); +- return 0; ++ return -EINVAL; + case TUNER_PHILIPS_FM1216ME_MK3: + case TUNER_PHILIPS_FM1236_MK3: + case TUNER_PHILIPS_FMD1216ME_MK3: +@@ -701,7 +701,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe, + TUNER_RATIO_SELECT_50; /* 50 kHz step */ + + /* Bandswitch byte */ +- simple_radio_bandswitch(fe, &buffer[0]); ++ if (simple_radio_bandswitch(fe, &buffer[0])) ++ return 0; + + /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps + freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c +index f2854337cdcac..abfc49901222e 100644 +--- a/drivers/media/usb/uvc/uvc_ctrl.c ++++ b/drivers/media/usb/uvc/uvc_ctrl.c +@@ -778,12 +778,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, + offset &= 7; + mask = ((1LL << bits) - 1) << offset; + +- for (; bits > 0; data++) { ++ while (1) { + u8 byte = *data & mask; + value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); + bits -= 8 - (offset > 0 ? offset : 0); ++ if (bits <= 0) ++ break; ++ + offset -= 8; + mask = (1 << bits) - 1; ++ data++; + } + + /* Sign-extend the value if needed. */ +diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c +index 554063c07d7a2..f2457953f27c6 100644 +--- a/drivers/media/usb/uvc/uvc_entity.c ++++ b/drivers/media/usb/uvc/uvc_entity.c +@@ -78,10 +78,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain, + int ret; + + if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { ++ u32 function; ++ + v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); + strlcpy(entity->subdev.name, entity->name, + sizeof(entity->subdev.name)); + ++ switch (UVC_ENTITY_TYPE(entity)) { ++ case UVC_VC_SELECTOR_UNIT: ++ function = MEDIA_ENT_F_VID_MUX; ++ break; ++ case UVC_VC_PROCESSING_UNIT: ++ case UVC_VC_EXTENSION_UNIT: ++ /* For lack of a better option. */ ++ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; ++ break; ++ case UVC_COMPOSITE_CONNECTOR: ++ case UVC_COMPONENT_CONNECTOR: ++ function = MEDIA_ENT_F_CONN_COMPOSITE; ++ break; ++ case UVC_SVIDEO_CONNECTOR: ++ function = MEDIA_ENT_F_CONN_SVIDEO; ++ break; ++ case UVC_ITT_CAMERA: ++ function = MEDIA_ENT_F_CAM_SENSOR; ++ break; ++ case UVC_TT_VENDOR_SPECIFIC: ++ case UVC_ITT_VENDOR_SPECIFIC: ++ case UVC_ITT_MEDIA_TRANSPORT_INPUT: ++ case UVC_OTT_VENDOR_SPECIFIC: ++ case UVC_OTT_DISPLAY: ++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: ++ case UVC_EXTERNAL_VENDOR_SPECIFIC: ++ default: ++ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; ++ break; ++ } ++ ++ entity->subdev.entity.function = function; ++ + ret = media_entity_pads_init(&entity->subdev.entity, + entity->num_pads, entity->pads); + +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c +index e0173bf4b0dc7..ec1ac61a21ed1 100644 +--- a/drivers/mfd/sm501.c ++++ b/drivers/mfd/sm501.c +@@ -1429,8 +1429,14 @@ static int sm501_plat_probe(struct platform_device *dev) + goto err_claim; + } + +- return sm501_init_dev(sm); ++ ret = sm501_init_dev(sm); ++ if (ret) ++ goto err_unmap; ++ ++ return 0; + ++ err_unmap: ++ iounmap(sm->regs); + err_claim: + release_resource(sm->regs_claim); + kfree(sm->regs_claim); +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c +index 0e4193cb08cf1..e1f59b17715d5 100644 +--- a/drivers/misc/mic/scif/scif_rma.c ++++ b/drivers/misc/mic/scif/scif_rma.c +@@ -1403,6 +1403,8 @@ retry: + NULL); + up_write(&mm->mmap_sem); + if (nr_pages != pinned_pages->nr_pages) { ++ if (pinned_pages->nr_pages < 0) ++ pinned_pages->nr_pages = 0; + if (try_upgrade) { + if (ulimit) + __scif_dec_pinned_vm_lock(mm, +@@ -1423,7 +1425,6 @@ retry: + + if (pinned_pages->nr_pages < nr_pages) { + err = -EFAULT; +- pinned_pages->nr_pages = nr_pages; + goto dec_pinned; + } + +@@ -1436,7 +1437,6 @@ dec_pinned: + __scif_dec_pinned_vm_lock(mm, nr_pages, 0); + /* Something went wrong! Rollback */ + error_unmap: +- pinned_pages->nr_pages = nr_pages; + scif_destroy_pinned_pages(pinned_pages); + *pages = NULL; + dev_dbg(scif_info.mdev.this_device, +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c +index bd52f29b4a4e2..5e0d1ac67f73f 100644 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c +@@ -671,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)produce_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(produce) failed (retval=%d)", + retval); +- qp_release_pages(produce_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(produce_q->kernel_if->u.h.header_page, ++ retval, false); + err = VMCI_ERROR_NO_MEM; + goto out; + } +@@ -683,8 +684,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)consume_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(consume) failed (retval=%d)", + retval); +- qp_release_pages(consume_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(consume_q->kernel_if->u.h.header_page, ++ retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; +diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c +index c950c880ad590..90e6cb64db69c 100644 +--- a/drivers/mtd/lpddr/lpddr2_nvm.c ++++ b/drivers/mtd/lpddr/lpddr2_nvm.c +@@ -402,6 +402,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add, + return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); + } + ++static const struct mtd_info lpddr2_nvm_mtd_info = { ++ .type = MTD_RAM, ++ .writesize = 1, ++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), ++ ._read = lpddr2_nvm_read, ++ ._write = lpddr2_nvm_write, ++ ._erase = lpddr2_nvm_erase, ++ ._unlock = lpddr2_nvm_unlock, ++ ._lock = lpddr2_nvm_lock, ++}; ++ + /* + * lpddr2_nvm driver probe method + */ +@@ -442,6 +453,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + .pfow_base = OW_BASE_ADDRESS, + .fldrv_priv = pcm_data, + }; ++ + if (IS_ERR(map->virt)) + return PTR_ERR(map->virt); + +@@ -453,22 +465,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + return PTR_ERR(pcm_data->ctl_regs); + + /* Populate mtd_info data structure */ +- *mtd = (struct mtd_info) { +- .dev = { .parent = &pdev->dev }, +- .name = pdev->dev.init_name, +- .type = MTD_RAM, +- .priv = map, +- .size = resource_size(add_range), +- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width, +- .writesize = 1, +- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width, +- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), +- ._read = lpddr2_nvm_read, +- ._write = lpddr2_nvm_write, +- ._erase = lpddr2_nvm_erase, +- ._unlock = lpddr2_nvm_unlock, +- ._lock = lpddr2_nvm_lock, +- }; ++ *mtd = lpddr2_nvm_mtd_info; ++ mtd->dev.parent = &pdev->dev; ++ mtd->name = pdev->dev.init_name; ++ mtd->priv = map; ++ mtd->size = resource_size(add_range); ++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width; ++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width; + + /* Verify the presence of the device looking for PFOW string */ + if (!lpddr2_nvm_pfow_present(map)) { +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c +index e078fc41aa612..feeffde2d4fa9 100644 +--- a/drivers/mtd/mtdoops.c ++++ b/drivers/mtd/mtdoops.c +@@ -293,12 +293,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, + record_size - MTDOOPS_HEADER_SIZE, NULL); + +- /* Panics must be written immediately */ +- if (reason != KMSG_DUMP_OOPS) ++ if (reason != KMSG_DUMP_OOPS) { ++ /* Panics must be written immediately */ + mtdoops_write(cxt, 1); +- +- /* For other cases, schedule work to write it "nicely" */ +- schedule_work(&cxt->work_write); ++ } else { ++ /* For other cases, schedule work to write it "nicely" */ ++ schedule_work(&cxt->work_write); ++ } + } + + static void mtdoops_notify_add(struct mtd_info *mtd) +diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h +index 9a63b51e1d82f..6f2dab7e33d65 100644 +--- a/drivers/net/dsa/realtek-smi.h ++++ b/drivers/net/dsa/realtek-smi.h +@@ -25,6 +25,9 @@ struct rtl8366_mib_counter { + const char *name; + }; + ++/** ++ * struct rtl8366_vlan_mc - Virtual LAN member configuration ++ */ + struct rtl8366_vlan_mc { + u16 vid; + u16 untag; +@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi); + int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid); +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid); + int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c +index 430988f797225..dddbc86429bd9 100644 +--- a/drivers/net/dsa/rtl8366.c ++++ b/drivers/net/dsa/rtl8366.c +@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) + } + EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); + ++/** ++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration ++ * @smi: the Realtek SMI device instance ++ * @vid: the VLAN ID to look up or allocate ++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config ++ * if successful ++ * @return: index of a new member config or negative error number ++ */ ++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid, ++ struct rtl8366_vlan_mc *vlanmc) ++{ ++ struct rtl8366_vlan_4k vlan4k; ++ int ret; ++ int i; ++ ++ /* Try to find an existing member config entry for this VID */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ if (vid == vlanmc->vid) ++ return i; ++ } ++ ++ /* We have no MC entry for this VID, try to find an empty one */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ if (vlanmc->vid == 0 && vlanmc->member == 0) { ++ /* Update the entry from the 4K table */ ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); ++ if (ret) { ++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ vlanmc->vid = vid; ++ vlanmc->member = vlan4k.member; ++ vlanmc->untag = vlan4k.untag; ++ vlanmc->fid = vlan4k.fid; ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n", ++ i, vid); ++ return i; ++ } ++ } ++ ++ /* MC table is full, try to find an unused entry and replace it */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ int used; ++ ++ ret = rtl8366_mc_is_used(smi, i, &used); ++ if (ret) ++ return ret; ++ ++ if (!used) { ++ /* Update the entry from the 4K table */ ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); ++ if (ret) ++ return ret; ++ ++ vlanmc->vid = vid; ++ vlanmc->member = vlan4k.member; ++ vlanmc->untag = vlan4k.untag; ++ vlanmc->fid = vlan4k.fid; ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n", ++ i, vid); ++ return i; ++ } ++ } ++ ++ dev_err(smi->dev, "all VLAN member configurations are in use\n"); ++ return -ENOSPC; ++} ++ + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) + { ++ struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; ++ int mc; + int ret; +- int i; ++ ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return -EINVAL; + + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", +@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + +- /* Try to find an existing MC entry for this VID */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- struct rtl8366_vlan_mc vlanmc; +- +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vid == vlanmc.vid) { +- /* update the MC entry */ +- vlanmc.member |= member; +- vlanmc.untag |= untag; +- vlanmc.fid = fid; +- +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); ++ /* Find or allocate a member config for this VID */ ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc); ++ if (ret < 0) ++ return ret; ++ mc = ret; + +- dev_dbg(smi->dev, +- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", +- vid, vlanmc.member, vlanmc.untag); ++ /* Update the MC entry */ ++ vlanmc.member |= member; ++ vlanmc.untag |= untag; ++ vlanmc.fid = fid; + +- break; +- } +- } ++ /* Commit updates to the MC entry */ ++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc); ++ if (ret) ++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n", ++ mc, vid); ++ else ++ dev_dbg(smi->dev, ++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", ++ vid, vlanmc.member, vlanmc.untag); + + return ret; + } + EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) +-{ +- struct rtl8366_vlan_mc vlanmc; +- int ret; +- int index; +- +- ret = smi->ops->get_mc_index(smi, port, &index); +- if (ret) +- return ret; +- +- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); +- if (ret) +- return ret; +- +- *val = vlanmc.vid; +- return 0; +-} +-EXPORT_SYMBOL_GPL(rtl8366_get_pvid); +- + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) + { + struct rtl8366_vlan_mc vlanmc; +- struct rtl8366_vlan_4k vlan4k; ++ int mc; + int ret; +- int i; +- +- /* Try to find an existing MC entry for this VID */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vid == vlanmc.vid) { +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } +- } +- +- /* We have no MC entry for this VID, try to find an empty one */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vlanmc.vid == 0 && vlanmc.member == 0) { +- /* Update the entry from the 4K table */ +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); +- if (ret) +- return ret; + +- vlanmc.vid = vid; +- vlanmc.member = vlan4k.member; +- vlanmc.untag = vlan4k.untag; +- vlanmc.fid = vlan4k.fid; +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } +- } +- +- /* MC table is full, try to find an unused entry and replace it */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- int used; +- +- ret = rtl8366_mc_is_used(smi, i, &used); +- if (ret) +- return ret; +- +- if (!used) { +- /* Update the entry from the 4K table */ +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); +- if (ret) +- return ret; ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return -EINVAL; + +- vlanmc.vid = vid; +- vlanmc.member = vlan4k.member; +- vlanmc.untag = vlan4k.untag; +- vlanmc.fid = vlan4k.fid; +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; ++ /* Find or allocate a member config for this VID */ ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc); ++ if (ret < 0) ++ return ret; ++ mc = ret; + +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } ++ ret = smi->ops->set_mc_index(smi, port, mc); ++ if (ret) { ++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n", ++ mc, port); ++ return ret; + } + +- dev_err(smi->dev, +- "all VLAN member configurations are in use\n"); ++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n", ++ port, vid, mc); + +- return -ENOSPC; ++ return 0; + } + EXPORT_SYMBOL_GPL(rtl8366_set_pvid); + +@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, + if (!smi->ops->is_vlan_valid(smi, vid)) + return; + +- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", ++ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", ++ vlan->vid_begin, + port, + untagged ? "untagged" : "tagged", + pvid ? " PVID" : "no PVID"); +@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, + dev_err(smi->dev, "port is DSA or CPU port\n"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { +- int pvid_val = 0; +- +- dev_info(smi->dev, "add VLAN %04x\n", vid); + member |= BIT(port); + + if (untagged) + untag |= BIT(port); + +- /* To ensure that we have a valid MC entry for this VLAN, +- * initialize the port VLAN ID here. +- */ +- ret = rtl8366_get_pvid(smi, port, &pvid_val); +- if (ret < 0) { +- dev_err(smi->dev, "could not lookup PVID for port %d\n", +- port); +- return; +- } +- if (pvid_val == 0) { +- ret = rtl8366_set_pvid(smi, port, vid); +- if (ret < 0) +- return; +- } +- + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); ++ ++ if (!pvid) ++ continue; ++ ++ ret = rtl8366_set_pvid(smi, port, vid); ++ if (ret) ++ dev_err(smi->dev, ++ "failed to set PVID on port %d to VLAN %04x", ++ port, vid); ++ ++ if (!ret) ++ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n", ++ vid, port); + } + } + EXPORT_SYMBOL_GPL(rtl8366_vlan_add); +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c +index f4b14b6acd22d..5aefd7a4696a5 100644 +--- a/drivers/net/dsa/rtl8366rb.c ++++ b/drivers/net/dsa/rtl8366rb.c +@@ -1270,7 +1270,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) + if (smi->vlan4k_enabled) + max = RTL8366RB_NUM_VIDS - 1; + +- if (vlan == 0 || vlan >= max) ++ if (vlan == 0 || vlan > max) + return false; + + return true; +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h +index 0dd64acd2a3fb..08cac1bfacafb 100644 +--- a/drivers/net/ethernet/cisco/enic/enic.h ++++ b/drivers/net/ethernet/cisco/enic/enic.h +@@ -171,6 +171,7 @@ struct enic { + u16 num_vfs; + #endif + spinlock_t enic_api_lock; ++ bool enic_api_busy; + struct enic_port_profile *pp; + + /* work queue cache line section */ +diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c +index b161f24522b87..b028ea2dec2b9 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_api.c ++++ b/drivers/net/ethernet/cisco/enic/enic_api.c +@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + struct vnic_dev *vdev = enic->vdev; + + spin_lock(&enic->enic_api_lock); ++ while (enic->enic_api_busy) { ++ spin_unlock(&enic->enic_api_lock); ++ cpu_relax(); ++ spin_lock(&enic->enic_api_lock); ++ } ++ + spin_lock_bh(&enic->devcmd_lock); + + vnic_dev_cmd_proxy_by_index_start(vdev, vf); +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 026a3bd71204f..810cbe2210463 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -2142,8 +2142,6 @@ static int enic_dev_wait(struct vnic_dev *vdev, + int done; + int err; + +- BUG_ON(in_interrupt()); +- + err = start(vdev, arg); + if (err) + return err; +@@ -2331,6 +2329,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic) + rss_hash_bits, rss_base_cpu, rss_enable); + } + ++static void enic_set_api_busy(struct enic *enic, bool busy) ++{ ++ spin_lock(&enic->enic_api_lock); ++ enic->enic_api_busy = busy; ++ spin_unlock(&enic->enic_api_lock); ++} ++ + static void enic_reset(struct work_struct *work) + { + struct enic *enic = container_of(work, struct enic, reset); +@@ -2340,7 +2345,9 @@ static void enic_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_stop(enic->netdev); + enic_dev_soft_reset(enic); + enic_reset_addr_lists(enic); +@@ -2348,7 +2355,10 @@ static void enic_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +@@ -2360,7 +2370,9 @@ static void enic_tx_hang_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_dev_hang_notify(enic); + enic_stop(enic->netdev); + enic_dev_hang_reset(enic); +@@ -2369,7 +2381,10 @@ static void enic_tx_hang_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 3b6da228140e3..7d1a669416f20 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1897,6 +1897,27 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + return ret; + } + ++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) ++{ ++ struct fec_enet_private *fep = netdev_priv(ndev); ++ struct phy_device *phy_dev = ndev->phydev; ++ ++ if (phy_dev) { ++ phy_reset_after_clk_enable(phy_dev); ++ } else if (fep->phy_node) { ++ /* ++ * If the PHY still is not bound to the MAC, but there is ++ * OF PHY node and a matching PHY device instance already, ++ * use the OF PHY node to obtain the PHY device instance, ++ * and then use that PHY device instance when triggering ++ * the PHY reset. ++ */ ++ phy_dev = of_phy_find_device(fep->phy_node); ++ phy_reset_after_clk_enable(phy_dev); ++ put_device(&phy_dev->mdio.dev); ++ } ++} ++ + static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + { + struct fec_enet_private *fep = netdev_priv(ndev); +@@ -1923,7 +1944,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + if (ret) + goto failed_clk_ref; + +- phy_reset_after_clk_enable(ndev->phydev); ++ fec_enet_phy_reset_after_clk_enable(ndev); + } else { + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) { +@@ -2929,16 +2950,16 @@ fec_enet_open(struct net_device *ndev) + /* Init MAC prior to mii bus probe */ + fec_restart(ndev); + +- /* Probe and connect to PHY when open the interface */ +- ret = fec_enet_mii_probe(ndev); +- if (ret) +- goto err_enet_mii_probe; +- + /* Call phy_reset_after_clk_enable() again if it failed during + * phy_reset_after_clk_enable() before because the PHY wasn't probed. + */ + if (reset_again) +- phy_reset_after_clk_enable(ndev->phydev); ++ fec_enet_phy_reset_after_clk_enable(ndev); ++ ++ /* Probe and connect to PHY when open the interface */ ++ ret = fec_enet_mii_probe(ndev); ++ if (ret) ++ goto err_enet_mii_probe; + + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index e2f6670d6eaf0..75a1915d95aa8 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1330,6 +1330,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); + int lrg_pkt = ibmveth_rxq_large_packet(adapter); ++ __sum16 iph_check = 0; + + skb = ibmveth_rxq_get_buffer(adapter); + +@@ -1366,16 +1367,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); + +- if (csum_good) { +- skb->ip_summed = CHECKSUM_UNNECESSARY; +- ibmveth_rx_csum_helper(skb, adapter); ++ /* PHYP without PLSO support places a -1 in the ip ++ * checksum for large send frames. ++ */ ++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) { ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ ++ iph_check = iph->check; + } + +- if (length > netdev->mtu + ETH_HLEN) { ++ if ((length > netdev->mtu + ETH_HLEN) || ++ lrg_pkt || iph_check == 0xffff) { + ibmveth_rx_mss_helper(skb, mss, lrg_pkt); + adapter->rx_large_packets++; + } + ++ if (csum_good) { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ ibmveth_rx_csum_helper(skb, adapter); ++ } ++ + napi_gro_receive(napi, skb); /* send it up */ + + netdev->stats.rx_packets++; +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c +index ae195f8adff58..5bdff77c0ad10 100644 +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -1113,7 +1113,7 @@ out: + return rc; + + probe_err_register: +- kfree(lp->td_ring); ++ kfree(KSEG0ADDR(lp->td_ring)); + probe_err_td_ring: + iounmap(lp->tx_dma_regs); + probe_err_dma_tx: +@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev) + iounmap(lp->eth_regs); + iounmap(lp->rx_dma_regs); + iounmap(lp->tx_dma_regs); ++ kfree(KSEG0ADDR(lp->td_ring)); + + unregister_netdev(bif->dev); + free_netdev(bif->dev); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index 45d9a5f8fa1bc..f509a6ce31db7 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -945,6 +945,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) + bool clean_complete = true; + int done; + ++ if (!budget) ++ return 0; ++ + if (priv->tx_ring_num[TX_XDP]) { + xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; + if (xdp_tx_cq->xdp_busy) { +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +index 1857ee0f0871d..e58052d07e399 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + .dma = tx_info->map0_dma, + }; + +- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { ++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + dma_unmap_page(priv->ddev, tx_info->map0_dma, + PAGE_SIZE, priv->dma_dir); + put_page(tx_info->page); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +index d359e850dbf07..0fd62510fb277 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +@@ -475,8 +475,9 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, + switch (clock->ptp_info.pin_config[pin].func) { + case PTP_PF_EXTTS: + ptp_event.index = pin; +- ptp_event.timestamp = timecounter_cyc2time(&clock->tc, +- be64_to_cpu(eqe->data.pps.time_stamp)); ++ ptp_event.timestamp = ++ mlx5_timecounter_cyc2time(clock, ++ be64_to_cpu(eqe->data.pps.time_stamp)); + if (clock->pps_info.enabled) { + ptp_event.type = PTP_CLOCK_PPSUSR; + ptp_event.pps_times.ts_real = +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 6df404e3dd271..58dc4fe139fbe 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -4111,6 +4111,27 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) + rtl_unlock_work(tp); + } + ++static void rtl_init_rxcfg(struct rtl8169_private *tp) ++{ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: ++ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: ++ RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); ++ break; ++ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: ++ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: ++ case RTL_GIGA_MAC_VER_38: ++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); ++ break; ++ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: ++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); ++ break; ++ default: ++ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); ++ break; ++ } ++} ++ + static int rtl_set_mac_address(struct net_device *dev, void *p) + { + struct rtl8169_private *tp = netdev_priv(dev); +@@ -4128,6 +4149,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) + + pm_runtime_put_noidle(d); + ++ /* Reportedly at least Asus X453MA truncates packets otherwise */ ++ if (tp->mac_version == RTL_GIGA_MAC_VER_37) ++ rtl_init_rxcfg(tp); ++ + return 0; + } + +@@ -4289,27 +4314,6 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) + } + } + +-static void rtl_init_rxcfg(struct rtl8169_private *tp) +-{ +- switch (tp->mac_version) { +- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: +- case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: +- RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); +- break; +- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: +- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: +- case RTL_GIGA_MAC_VER_38: +- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); +- break; +- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: +- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); +- break; +- default: +- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); +- break; +- } +-} +- + static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) + { + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +@@ -6826,7 +6830,7 @@ static int rtl8169_close(struct net_device *dev) + + phy_disconnect(dev->phydev); + +- pci_free_irq(pdev, 0, tp); ++ free_irq(pci_irq_vector(pdev, 0), tp); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); +@@ -6881,8 +6885,8 @@ static int rtl_open(struct net_device *dev) + + rtl_request_firmware(tp); + +- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, +- dev->name); ++ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, ++ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + +@@ -6915,7 +6919,7 @@ out: + return retval; + + err_free_irq: +- pci_free_irq(pdev, 0, tp); ++ free_irq(pci_irq_vector(pdev, 0), tp); + err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index c41879a955b57..2872684906e14 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -177,32 +177,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) + } + } + +-/** +- * stmmac_stop_all_queues - Stop all queues +- * @priv: driver private structure +- */ +-static void stmmac_stop_all_queues(struct stmmac_priv *priv) +-{ +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use; +- u32 queue; +- +- for (queue = 0; queue < tx_queues_cnt; queue++) +- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); +-} +- +-/** +- * stmmac_start_all_queues - Start all queues +- * @priv: driver private structure +- */ +-static void stmmac_start_all_queues(struct stmmac_priv *priv) +-{ +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use; +- u32 queue; +- +- for (queue = 0; queue < tx_queues_cnt; queue++) +- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); +-} +- + static void stmmac_service_event_schedule(struct stmmac_priv *priv) + { + if (!test_bit(STMMAC_DOWN, &priv->state) && +@@ -2678,7 +2652,7 @@ static int stmmac_open(struct net_device *dev) + } + + stmmac_enable_all_queues(priv); +- stmmac_start_all_queues(priv); ++ netif_tx_start_all_queues(priv->dev); + + return 0; + +@@ -2724,8 +2698,6 @@ static int stmmac_release(struct net_device *dev) + phy_disconnect(dev->phydev); + } + +- stmmac_stop_all_queues(priv); +- + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) +@@ -4519,7 +4491,6 @@ int stmmac_suspend(struct device *dev) + mutex_lock(&priv->lock); + + netif_device_detach(ndev); +- stmmac_stop_all_queues(priv); + + stmmac_disable_all_queues(priv); + +@@ -4628,8 +4599,6 @@ int stmmac_resume(struct device *dev) + + stmmac_enable_all_queues(priv); + +- stmmac_start_all_queues(priv); +- + mutex_unlock(&priv->lock); + + if (ndev->phydev) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index af58bf54aa9b6..d2612b69257ea 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1312,6 +1312,7 @@ static const struct usb_device_id products[] = { + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ + {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ + {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ ++ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ + + /* 4. Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c +index 7221a53b8b144..500463044b1ab 100644 +--- a/drivers/net/wan/hdlc.c ++++ b/drivers/net/wan/hdlc.c +@@ -49,7 +49,15 @@ static struct hdlc_proto *first_proto; + static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *p, struct net_device *orig_dev) + { +- struct hdlc_device *hdlc = dev_to_hdlc(dev); ++ struct hdlc_device *hdlc; ++ ++ /* First make sure "dev" is an HDLC device */ ++ if (!(dev->priv_flags & IFF_WAN_HDLC)) { ++ kfree_skb(skb); ++ return NET_RX_SUCCESS; ++ } ++ ++ hdlc = dev_to_hdlc(dev); + + if (!net_eq(dev_net(dev), &init_net)) { + kfree_skb(skb); +diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c +index 8bd3ed9058132..676dea2918bf3 100644 +--- a/drivers/net/wan/hdlc_raw_eth.c ++++ b/drivers/net/wan/hdlc_raw_eth.c +@@ -102,6 +102,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->tx_queue_len = old_qlen; ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING; + eth_hw_addr_random(dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c +index f761d651c16e7..2276d608bca35 100644 +--- a/drivers/net/wireless/ath/ath10k/ce.c ++++ b/drivers/net/wireless/ath/ath10k/ce.c +@@ -1453,7 +1453,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, +- (nentries * sizeof(struct ce_desc_64) + ++ (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 81af403c19c2a..faaca7fe9ad1e 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -6862,7 +6862,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar, + struct ieee80211_channel *channel) + { + int ret; +- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; ++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; + + lockdep_assert_held(&ar->conf_mutex); + +diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c +index 0c61dbaa62a41..702c4761006ca 100644 +--- a/drivers/net/wireless/ath/ath6kl/main.c ++++ b/drivers/net/wireless/ath/ath6kl/main.c +@@ -429,6 +429,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, + + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); + ++ if (aid < 1 || aid > AP_MAX_NUM_STA) ++ return; ++ + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *) assoc_info; +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c +index bc7916f2add09..987ebae8ea0e1 100644 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c +@@ -2648,6 +2648,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + return -EINVAL; + } + ++ if (tsid >= 16) { ++ ath6kl_err("invalid tsid: %d\n", tsid); ++ return -EINVAL; ++ } ++ + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index f705f0e1cb5be..05fca38b38ed4 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, + + if (skb) { + htc_hdr = (struct htc_frame_hdr *) skb->data; ++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint)) ++ goto ret; + endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; + skb_pull(skb, sizeof(struct htc_frame_hdr)); + +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index ad051f34e65b2..46ae4ec4ad47d 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = { + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .mcs = { + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, +- .rx_highest = cpu_to_le16(72), ++ .rx_highest = cpu_to_le16(150), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + } + } +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +index 9d7b8834b8545..db4c541f58ae0 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +@@ -438,7 +438,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, + ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); + + if (ret || !(*ifp) || !(*ifp)->ndev) { +- if (ret != -ENODATA && *ifp) ++ if (ret != -ENODATA && *ifp && (*ifp)->ndev) + (*ifp)->ndev->stats.rx_errors++; + brcmu_pkt_buf_free_skb(skb); + return -ENODATA; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 58653598db146..525b26e0f65ee 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -3424,9 +3424,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, + aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, +- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", +- channel->hw_value, req_dur, duration, delay, +- dtim_interval); ++ "ROC: Requesting to remain on channel %u for %ums\n", ++ channel->hw_value, req_dur); ++ IWL_DEBUG_TE(mvm, ++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", ++ duration, delay, dtim_interval); ++ + /* Set the node address */ + memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c +index 85d6d5f3dce5b..c9f6cd2919699 100644 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c +@@ -1895,7 +1895,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, + chan, CFG80211_BSS_FTYPE_UNKNOWN, + bssid, timestamp, + cap_info_bitmap, beacon_period, +- ie_buf, ie_len, rssi, GFP_KERNEL); ++ ie_buf, ie_len, rssi, GFP_ATOMIC); + if (bss) { + bss_priv = (struct mwifiex_bss_priv *)bss->priv; + bss_priv->band = band; +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c +index bfbe3aa058d93..0773d81072aa1 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c +@@ -1985,6 +1985,8 @@ error: + kfree(card->mpa_rx.buf); + card->mpa_tx.buf_size = 0; + card->mpa_rx.buf_size = 0; ++ card->mpa_tx.buf = NULL; ++ card->mpa_rx.buf = NULL; + } + + return ret; +diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c +index 734844b34c266..dd473b206f123 100644 +--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c ++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c +@@ -894,6 +894,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) + default: + pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid, + vif->vifid, vif->wdev.iftype); ++ dev_kfree_skb(cmd_skb); + ret = -EINVAL; + goto out; + } +@@ -2212,6 +2213,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, + break; + default: + pr_err("unsupported iftype %d\n", vif->wdev.iftype); ++ dev_kfree_skb(cmd_skb); + ret = -EINVAL; + goto out; + } +diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c +index 0e31f1392a53c..949b07e29c06b 100644 +--- a/drivers/perf/xgene_pmu.c ++++ b/drivers/perf/xgene_pmu.c +@@ -1474,17 +1474,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) + } + + #if defined(CONFIG_ACPI) +-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data) +-{ +- struct resource *res = data; +- +- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) +- acpi_dev_resource_memory(ares, res); +- +- /* Always tell the ACPI core to skip this resource */ +- return 1; +-} +- + static struct + xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct acpi_device *adev, u32 type) +@@ -1496,6 +1485,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; ++ struct resource_entry *rentry; + int enable_bit; + int rc; + +@@ -1504,11 +1494,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + return NULL; + + INIT_LIST_HEAD(&resource_list); +- rc = acpi_dev_get_resources(adev, &resource_list, +- acpi_pmu_dev_add_resource, &res); ++ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); ++ if (rc <= 0) { ++ dev_err(dev, "PMU type %d: No resources found\n", type); ++ return NULL; ++ } ++ ++ list_for_each_entry(rentry, &resource_list, node) { ++ if (resource_type(rentry->res) == IORESOURCE_MEM) { ++ res = *rentry->res; ++ rentry = NULL; ++ break; ++ } ++ } + acpi_dev_free_resource_list(&resource_list); +- if (rc < 0) { +- dev_err(dev, "PMU type %d: No resource address found\n", type); ++ ++ if (rentry) { ++ dev_err(dev, "PMU type %d: No memory resource found\n", type); + return NULL; + } + +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig +index 0f38d51f47c64..e6cd314919de1 100644 +--- a/drivers/pinctrl/bcm/Kconfig ++++ b/drivers/pinctrl/bcm/Kconfig +@@ -21,6 +21,7 @@ config PINCTRL_BCM2835 + select PINMUX + select PINCONF + select GENERIC_PINCONF ++ select GPIOLIB + select GPIOLIB_IRQCHIP + + config PINCTRL_IPROC_GPIO +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c +index 33c3eca0ece97..5b5a4323ae63d 100644 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c +@@ -120,7 +120,7 @@ static const struct regmap_config mcp23x08_regmap = { + .max_register = MCP_OLAT, + }; + +-static const struct reg_default mcp23x16_defaults[] = { ++static const struct reg_default mcp23x17_defaults[] = { + {.reg = MCP_IODIR << 1, .def = 0xffff}, + {.reg = MCP_IPOL << 1, .def = 0x0000}, + {.reg = MCP_GPINTEN << 1, .def = 0x0000}, +@@ -131,23 +131,23 @@ static const struct reg_default mcp23x16_defaults[] = { + {.reg = MCP_OLAT << 1, .def = 0x0000}, + }; + +-static const struct regmap_range mcp23x16_volatile_range = { ++static const struct regmap_range mcp23x17_volatile_range = { + .range_min = MCP_INTF << 1, + .range_max = MCP_GPIO << 1, + }; + +-static const struct regmap_access_table mcp23x16_volatile_table = { +- .yes_ranges = &mcp23x16_volatile_range, ++static const struct regmap_access_table mcp23x17_volatile_table = { ++ .yes_ranges = &mcp23x17_volatile_range, + .n_yes_ranges = 1, + }; + +-static const struct regmap_range mcp23x16_precious_range = { +- .range_min = MCP_GPIO << 1, ++static const struct regmap_range mcp23x17_precious_range = { ++ .range_min = MCP_INTCAP << 1, + .range_max = MCP_GPIO << 1, + }; + +-static const struct regmap_access_table mcp23x16_precious_table = { +- .yes_ranges = &mcp23x16_precious_range, ++static const struct regmap_access_table mcp23x17_precious_table = { ++ .yes_ranges = &mcp23x17_precious_range, + .n_yes_ranges = 1, + }; + +@@ -157,10 +157,10 @@ static const struct regmap_config mcp23x17_regmap = { + + .reg_stride = 2, + .max_register = MCP_OLAT << 1, +- .volatile_table = &mcp23x16_volatile_table, +- .precious_table = &mcp23x16_precious_table, +- .reg_defaults = mcp23x16_defaults, +- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults), ++ .volatile_table = &mcp23x17_volatile_table, ++ .precious_table = &mcp23x17_precious_table, ++ .reg_defaults = mcp23x17_defaults, ++ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults), + .cache_type = REGCACHE_FLAT, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + }; +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c +index 69e28c12d5915..0c72de95b5ccd 100644 +--- a/drivers/platform/x86/mlx-platform.c ++++ b/drivers/platform/x86/mlx-platform.c +@@ -221,15 +221,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { + }, + }; + +-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = { +- { +- I2C_BOARD_INFO("24c32", 0x51), +- }, +- { +- I2C_BOARD_INFO("24c32", 0x50), +- }, +-}; +- + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { + { + I2C_BOARD_INFO("dps460", 0x59), +@@ -589,15 +580,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + }; + +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c +index 7a4a6406cf69a..69f8be065919e 100644 +--- a/drivers/pwm/pwm-lpss.c ++++ b/drivers/pwm/pwm-lpss.c +@@ -105,10 +105,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + * The equation is: + * base_unit = round(base_unit_range * freq / c) + */ +- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; ++ base_unit_range = BIT(lpwm->info->base_unit_bits); + freq *= base_unit_range; + + base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); ++ /* base_unit must not be 0 and we also want to avoid overflowing it */ ++ base_unit = clamp_val(base_unit, 1, base_unit_range - 1); + + on_time_div = 255ULL * duty_ns; + do_div(on_time_div, period_ns); +@@ -116,8 +118,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + + orig_ctrl = ctrl = pwm_lpss_read(pwm); + ctrl &= ~PWM_ON_TIME_DIV_MASK; +- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); +- base_unit &= base_unit_range; ++ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; + ctrl |= on_time_div; + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 37e6270749eef..c290c89421314 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -4363,15 +4363,20 @@ regulator_register(const struct regulator_desc *regulator_desc, + else if (regulator_desc->supply_name) + rdev->supply_name = regulator_desc->supply_name; + +- /* +- * Attempt to resolve the regulator supply, if specified, +- * but don't return an error if we fail because we will try +- * to resolve it again later as more regulators are added. +- */ +- if (regulator_resolve_supply(rdev)) +- rdev_dbg(rdev, "unable to resolve supply\n"); +- + ret = set_machine_constraints(rdev, constraints); ++ if (ret == -EPROBE_DEFER) { ++ /* Regulator might be in bypass mode and so needs its supply ++ * to set the constraints */ ++ /* FIXME: this currently triggers a chicken-and-egg problem ++ * when creating -SUPPLY symlink in sysfs to a regulator ++ * that is just being created */ ++ ret = regulator_resolve_supply(rdev); ++ if (!ret) ++ ret = set_machine_constraints(rdev, constraints); ++ else ++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n", ++ ERR_PTR(ret)); ++ } + if (ret < 0) + goto wash; + +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 3660059784f74..6221a8372cee2 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -3039,6 +3039,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, + goto create_eq_error; + } + ++ mem->dma = paddr; + mem->va = eq_vaddress; + ret = be_fill_queue(eq, phba->params.num_eq_entries, + sizeof(struct be_eq_entry), eq_vaddress); +@@ -3048,7 +3049,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, + goto create_eq_error; + } + +- mem->dma = paddr; + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, + BEISCSI_EQ_DELAY_DEF); + if (ret) { +@@ -3105,6 +3105,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, + goto create_cq_error; + } + ++ mem->dma = paddr; + ret = be_fill_queue(cq, phba->params.num_cq_entries, + sizeof(struct sol_cqe), cq_vaddress); + if (ret) { +@@ -3114,7 +3115,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, + goto create_cq_error; + } + +- mem->dma = paddr; + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, + false, 0); + if (ret) { +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c +index e519238864758..1b6f9351b43f9 100644 +--- a/drivers/scsi/csiostor/csio_hw.c ++++ b/drivers/scsi/csiostor/csio_hw.c +@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); +- ret = EINVAL; ++ ret = -EINVAL; + goto bye; + } + +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c +index 3e2f8ce1d9a97..7821c1695e824 100644 +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -676,7 +676,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) + struct nvme_fc_port_template *tmpl; + struct qla_hw_data *ha; + struct nvme_fc_port_info pinfo; +- int ret = EINVAL; ++ int ret = -EINVAL; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index f59b8982b2883..4ba9f46fcf748 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -1221,7 +1221,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); + exit_host_stats: + if (ql_iscsi_stats) +- dma_free_coherent(&ha->pdev->dev, host_stats_size, ++ dma_free_coherent(&ha->pdev->dev, stats_size, + ql_iscsi_stats, iscsi_stats_dma); + + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", +diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c +index 943172806a8a7..3e63e4ce45b04 100644 +--- a/drivers/slimbus/core.c ++++ b/drivers/slimbus/core.c +@@ -255,8 +255,6 @@ int slim_unregister_controller(struct slim_controller *ctrl) + { + /* Remove all clients */ + device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device); +- /* Enter Clock Pause */ +- slim_ctrl_clk_pause(ctrl, false, 0); + ida_simple_remove(&ctrl_ida, ctrl->id); + + return 0; +@@ -297,8 +295,8 @@ void slim_report_absent(struct slim_device *sbdev) + mutex_lock(&ctrl->lock); + sbdev->is_laddr_valid = false; + mutex_unlock(&ctrl->lock); +- +- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); ++ if (!ctrl->get_laddr) ++ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); + slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN); + } + EXPORT_SYMBOL_GPL(slim_report_absent); +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c +index f40ac8dcb0817..522a87fc573a6 100644 +--- a/drivers/slimbus/qcom-ngd-ctrl.c ++++ b/drivers/slimbus/qcom-ngd-ctrl.c +@@ -1272,9 +1272,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, + { + struct qcom_slim_ngd_qmi *qmi = + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); ++ struct qcom_slim_ngd_ctrl *ctrl = ++ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); + + qmi->svc_info.sq_node = 0; + qmi->svc_info.sq_port = 0; ++ ++ qcom_slim_ngd_enable(ctrl, false); + } + + static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c +index 7b7151ec14c8a..1d948fee1a039 100644 +--- a/drivers/spi/spi-s3c64xx.c ++++ b/drivers/spi/spi-s3c64xx.c +@@ -122,6 +122,7 @@ + + struct s3c64xx_spi_dma_data { + struct dma_chan *ch; ++ dma_cookie_t cookie; + enum dma_transfer_direction direction; + }; + +@@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data) + spin_unlock_irqrestore(&sdd->lock, flags); + } + +-static void prepare_dma(struct s3c64xx_spi_dma_data *dma, ++static int prepare_dma(struct s3c64xx_spi_dma_data *dma, + struct sg_table *sgt) + { + struct s3c64xx_spi_driver_data *sdd; + struct dma_slave_config config; + struct dma_async_tx_descriptor *desc; ++ int ret; + + memset(&config, 0, sizeof(config)); + +@@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, + + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, + dma->direction, DMA_PREP_INTERRUPT); ++ if (!desc) { ++ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist", ++ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx"); ++ return -ENOMEM; ++ } + + desc->callback = s3c64xx_spi_dmacb; + desc->callback_param = dma; + +- dmaengine_submit(desc); ++ dma->cookie = dmaengine_submit(desc); ++ ret = dma_submit_error(dma->cookie); ++ if (ret) { ++ dev_err(&sdd->pdev->dev, "DMA submission failed"); ++ return -EIO; ++ } ++ + dma_async_issue_pending(dma->ch); ++ return 0; + } + + static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) +@@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + } + +-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer, int dma_mode) + { + void __iomem *regs = sdd->regs; + u32 modecfg, chcfg; ++ int ret = 0; + + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); +@@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + chcfg |= S3C64XX_SPI_CH_TXCH_ON; + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; +- prepare_dma(&sdd->tx_dma, &xfer->tx_sg); ++ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg); + } else { + switch (sdd->cur_bpw) { + case 32: +@@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); +- prepare_dma(&sdd->rx_dma, &xfer->rx_sg); ++ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg); + } + } + ++ if (ret) ++ return ret; ++ + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); + writel(chcfg, regs + S3C64XX_SPI_CH_CFG); ++ ++ return 0; + } + + static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, +@@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd, + return 0; + } + +-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + { + void __iomem *regs = sdd->regs; ++ int ret; + u32 val; + + /* Disable Clock */ +@@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + + if (sdd->port_conf->clk_from_cmu) { + /* The src_clk clock is divided internally by 2 */ +- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ++ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ++ if (ret) ++ return ret; + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); +@@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } ++ ++ return 0; + } + + #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) +@@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, + sdd->cur_bpw = bpw; + sdd->cur_speed = speed; + sdd->cur_mode = spi->mode; +- s3c64xx_spi_config(sdd); ++ status = s3c64xx_spi_config(sdd); ++ if (status) ++ return status; + } + + if (!is_polling(sdd) && (xfer->len > fifo_len) && +@@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, + sdd->state &= ~RXBUSY; + sdd->state &= ~TXBUSY; + +- s3c64xx_enable_datapath(sdd, xfer, use_dma); +- + /* Start the signals */ + s3c64xx_spi_set_cs(spi, true); + ++ status = s3c64xx_enable_datapath(sdd, xfer, use_dma); ++ + spin_unlock_irqrestore(&sdd->lock, flags); + ++ if (status) { ++ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status); ++ break; ++ } ++ + if (use_dma) + status = s3c64xx_wait_for_dma(sdd, xfer); + else +diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +index 28cae82d795c7..fb824c5174497 100644 +--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c ++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +@@ -599,7 +599,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, + + prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE, + sizeof(struct ieee80211_rxb *), +- GFP_KERNEL); ++ GFP_ATOMIC); + if (!prxbIndicateArray) + return; + +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index 99314e5162447..0219b5a865bee 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -680,7 +680,7 @@ static void scatter_data_area(struct tcmu_dev *udev, + void *from, *to = NULL; + size_t copy_bytes, to_offset, offset; + struct scatterlist *sg; +- struct page *page; ++ struct page *page = NULL; + + for_each_sg(data_sg, sg, data_nents, i) { + int sg_remaining = sg->length; +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c +index cb4db1b3ca3c0..7853c6375325d 100644 +--- a/drivers/tty/hvc/hvcs.c ++++ b/drivers/tty/hvc/hvcs.c +@@ -1218,13 +1218,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + + tty_wait_until_sent(tty, HVCS_CLOSE_WAIT); + +- /* +- * This line is important because it tells hvcs_open that this +- * device needs to be re-configured the next time hvcs_open is +- * called. +- */ +- tty->driver_data = NULL; +- + free_irq(irq, hvcsd); + return; + } else if (hvcsd->port.count < 0) { +@@ -1239,6 +1232,13 @@ static void hvcs_cleanup(struct tty_struct * tty) + { + struct hvcs_struct *hvcsd = tty->driver_data; + ++ /* ++ * This line is important because it tells hvcs_open that this ++ * device needs to be re-configured the next time hvcs_open is ++ * called. ++ */ ++ tty->driver_data = NULL; ++ + tty_port_put(&hvcsd->port); + } + +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 00099a8439d21..c6a1d8c4e6894 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) + spin_lock_irqsave(&to->port->lock, flags); + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to->port, buf, c); ++ spin_unlock_irqrestore(&to->port->lock, flags); + /* And shovel */ + if (c) + tty_flip_buffer_push(to->port); +- spin_unlock_irqrestore(&to->port->lock, flags); + } + return c; + } +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index df8bd0c7b97db..cd13065095bc3 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -9,6 +9,7 @@ menu "Serial drivers" + + config SERIAL_EARLYCON + bool ++ depends on SERIAL_CORE + help + Support for early consoles with the earlycon parameter. This enables + the console before standard serial driver is probed. The console is +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index f18aa3f59e519..8e98b4df9b109 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -671,8 +671,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) + */ + static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + int is_isoc = hs_ep->isochronous; + unsigned int maxsize; ++ u32 mps = hs_ep->ep.maxpacket; ++ int dir_in = hs_ep->dir_in; + + if (is_isoc) + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : +@@ -681,6 +684,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + else + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) ++ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC; ++ + return maxsize; + } + +@@ -696,11 +704,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + * Isochronous - descriptor rx/tx bytes bitfield limit, + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not + * have concatenations from various descriptors within one packet. ++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds ++ * to a single descriptor. + * + * Selects corresponding mask for RX/TX bytes as well. + */ + static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; + u32 desc_size = 0; +@@ -724,6 +735,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) + desc_size -= desc_size % mps; + } + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) { ++ desc_size = mps; ++ *mask = DEV_DMA_NBYTES_MASK; ++ } ++ + return desc_size; + } + +@@ -1044,13 +1062,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, + length += (mps - (length % mps)); + } + +- /* +- * If more data to send, adjust DMA for EP0 out data stage. +- * ureq->dma stays unchanged, hence increment it by already +- * passed passed data count before starting new transaction. +- */ +- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && +- continuing) ++ if (continuing) + offset = ureq->actual; + + /* Fill DDMA chain entries */ +@@ -2220,22 +2232,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, + */ + static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + struct dwc2_hsotg *hsotg = hs_ep->parent; + unsigned int bytes_rem = 0; ++ unsigned int bytes_rem_correction = 0; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + int i; + u32 status; ++ u32 mps = hs_ep->ep.maxpacket; ++ int dir_in = hs_ep->dir_in; + + if (!desc) + return -EINVAL; + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) ++ bytes_rem_correction = 4 - (mps % 4); ++ + for (i = 0; i < hs_ep->desc_count; ++i) { + status = desc->status; + bytes_rem += status & DEV_DMA_NBYTES_MASK; ++ bytes_rem -= bytes_rem_correction; + + if (status & DEV_DMA_STS_MASK) + dev_err(hsotg->dev, "descriptor %d closed with %x\n", + i, status & DEV_DMA_STS_MASK); ++ ++ if (status & DEV_DMA_L) ++ break; ++ + desc++; + } + +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c +index a93415f33bf36..6d7861cba3f56 100644 +--- a/drivers/usb/dwc2/params.c ++++ b/drivers/usb/dwc2/params.c +@@ -808,7 +808,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) + int dwc2_init_params(struct dwc2_hsotg *hsotg) + { + const struct of_device_id *match; +- void (*set_params)(void *data); ++ void (*set_params)(struct dwc2_hsotg *data); + + dwc2_set_default_params(hsotg); + dwc2_get_device_properties(hsotg); +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index e2eefdd8bf786..09bc917d407d4 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -86,8 +86,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) + /* peak (theoretical) bulk transfer rate in bits-per-second */ + static inline unsigned ncm_bitrate(struct usb_gadget *g) + { +- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) +- return 13 * 1024 * 8 * 1000 * 8; ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) ++ return 4250000000U; ++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) ++ return 3750000000U; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c +index 0ef00315ec737..39ebc1b03698b 100644 +--- a/drivers/usb/gadget/function/u_ether.c ++++ b/drivers/usb/gadget/function/u_ether.c +@@ -93,7 +93,7 @@ struct eth_dev { + static inline int qlen(struct usb_gadget *gadget, unsigned qmult) + { + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || +- gadget->speed == USB_SPEED_SUPER)) ++ gadget->speed >= USB_SPEED_SUPER)) + return qmult * DEFAULT_QLEN; + else + return DEFAULT_QLEN; +diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c +index d414c7a3acf5a..a2f77625b7170 100644 +--- a/drivers/video/backlight/sky81452-backlight.c ++++ b/drivers/video/backlight/sky81452-backlight.c +@@ -207,6 +207,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( + num_entry); + if (ret < 0) { + dev_err(dev, "led-sources node is invalid.\n"); ++ of_node_put(np); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c +index e8594bbaea609..c6109a385cac9 100644 +--- a/drivers/video/fbdev/aty/radeon_base.c ++++ b/drivers/video/fbdev/aty/radeon_base.c +@@ -2327,7 +2327,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, + + ret = radeon_kick_out_firmware_fb(pdev); + if (ret) +- return ret; ++ goto err_release_fb; + + /* request the mem regions */ + ret = pci_request_region(pdev, 0, "radeonfb framebuffer"); +diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c +index dfe3eb769638b..fde27feae5d0c 100644 +--- a/drivers/video/fbdev/sis/init.c ++++ b/drivers/video/fbdev/sis/init.c +@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + + i = 0; + ++ if (SiS_Pr->ChipType == SIS_730) ++ queuedata = &FQBQData730[0]; ++ else ++ queuedata = &FQBQData[0]; ++ + if(ModeNo > 0x13) { + + /* Get VCLK */ +@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + /* Get half colordepth */ + colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; + +- if(SiS_Pr->ChipType == SIS_730) { +- queuedata = &FQBQData730[0]; +- } else { +- queuedata = &FQBQData[0]; +- } +- + do { + templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; + +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c +index 4b83109202b1c..3c4d20618de4c 100644 +--- a/drivers/video/fbdev/vga16fb.c ++++ b/drivers/video/fbdev/vga16fb.c +@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info) + } + + static void vga16fb_clock_chip(struct vga16fb_par *par, +- unsigned int pixclock, ++ unsigned int *pixclock, + const struct fb_info *info, + int mul, int div) + { +@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + { 0 /* bad */, 0x00, 0x00}}; + int err; + +- pixclock = (pixclock * mul) / div; ++ *pixclock = (*pixclock * mul) / div; + best = vgaclocks; +- err = pixclock - best->pixclock; ++ err = *pixclock - best->pixclock; + if (err < 0) err = -err; + for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) { + int tmp; + +- tmp = pixclock - ptr->pixclock; ++ tmp = *pixclock - ptr->pixclock; + if (tmp < 0) tmp = -tmp; + if (tmp < err) { + err = tmp; +@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + } + par->misc |= best->misc; + par->clkdiv = best->seq_clock_mode; +- pixclock = (best->pixclock * div) / mul; ++ *pixclock = (best->pixclock * div) / mul; + } + + #define FAIL(X) return -EINVAL +@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var, + + if (mode & MODE_8BPP) + /* pixel clock == vga clock / 2 */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 2); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2); + else + /* pixel clock == vga clock */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 1); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1); + + var->red.offset = var->green.offset = var->blue.offset = + var->transp.offset = 0; +diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c +index 1bbd910d4ddb8..2a7f7f47fe893 100644 +--- a/drivers/virt/fsl_hypervisor.c ++++ b/drivers/virt/fsl_hypervisor.c +@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + unsigned int i; + long ret = 0; +- int num_pinned; /* return value from get_user_pages() */ ++ int num_pinned = 0; /* return value from get_user_pages_fast() */ + phys_addr_t remote_paddr; /* The next address in the remote buffer */ + uint32_t count; /* The number of bytes left to copy */ + +@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + return -EINVAL; + + /* +- * The array of pages returned by get_user_pages() covers only ++ * The array of pages returned by get_user_pages_fast() covers only + * page-aligned memory. Since the user buffer is probably not + * page-aligned, we need to handle the discrepancy. + * +@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + /* + * 'pages' is an array of struct page pointers that's initialized by +- * get_user_pages(). ++ * get_user_pages_fast(). + */ + pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) { +@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + if (!sg_list_unaligned) { + pr_debug("fsl-hv: could not allocate S/G list\n"); + ret = -ENOMEM; +- goto exit; ++ goto free_pages; + } + sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); + +@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + num_pages, param.source != -1, pages); + + if (num_pinned != num_pages) { +- /* get_user_pages() failed */ + pr_debug("fsl-hv: could not lock source buffer\n"); + ret = (num_pinned < 0) ? num_pinned : -EFAULT; + goto exit; +@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + virt_to_phys(sg_list), num_pages); + + exit: +- if (pages) { +- for (i = 0; i < num_pages; i++) +- if (pages[i]) +- put_page(pages[i]); ++ if (pages && (num_pinned > 0)) { ++ for (i = 0; i < num_pinned; i++) ++ put_page(pages[i]); + } + + kfree(sg_list_unaligned); ++free_pages: + kfree(pages); + + if (!ret) +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c +index 3d19595eb3521..4a9b53229fba2 100644 +--- a/fs/cifs/asn1.c ++++ b/fs/cifs/asn1.c +@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n", ++ cls, con, tag, end); + return 0; + } + +@@ -563,8 +563,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -575,8 +575,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n", ++ cls, con, tag, sequence_end); + return 0; + } + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 3d63c76ed0989..e20d170d13f6b 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -2730,7 +2730,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, + if (rc) { + cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__, + enc ? "en" : "de"); +- return 0; ++ return rc; + } + + rc = smb3_crypto_aead_allocate(server); +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 3b9b726b1a6ca..5e705fa9a913d 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -1035,7 +1035,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, + + static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) + { +- static DEFINE_MUTEX(oom_adj_mutex); + struct mm_struct *mm = NULL; + struct task_struct *task; + int err = 0; +@@ -1075,7 +1074,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) + struct task_struct *p = find_lock_task_mm(task); + + if (p) { +- if (atomic_read(&p->mm->mm_users) > 1) { ++ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) { + mm = p->mm; + mmgrab(mm); + } +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c +index a73e5b34db418..5d4dc0f84f202 100644 +--- a/fs/quota/quota_v2.c ++++ b/fs/quota/quota_v2.c +@@ -283,6 +283,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); ++ d->dqb_pad = 0; + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); + } +diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c +index b228c821bae68..fe7323032e785 100644 +--- a/fs/xfs/libxfs/xfs_rtbitmap.c ++++ b/fs/xfs/libxfs/xfs_rtbitmap.c +@@ -1020,7 +1020,6 @@ xfs_rtalloc_query_range( + struct xfs_mount *mp = tp->t_mountp; + xfs_rtblock_t rtstart; + xfs_rtblock_t rtend; +- xfs_rtblock_t rem; + int is_free; + int error = 0; + +@@ -1029,13 +1028,12 @@ xfs_rtalloc_query_range( + if (low_rec->ar_startext >= mp->m_sb.sb_rextents || + low_rec->ar_startext == high_rec->ar_startext) + return 0; +- if (high_rec->ar_startext > mp->m_sb.sb_rextents) +- high_rec->ar_startext = mp->m_sb.sb_rextents; ++ high_rec->ar_startext = min(high_rec->ar_startext, ++ mp->m_sb.sb_rextents - 1); + + /* Iterate the bitmap, looking for discrepancies. */ + rtstart = low_rec->ar_startext; +- rem = high_rec->ar_startext - rtstart; +- while (rem) { ++ while (rtstart <= high_rec->ar_startext) { + /* Is the first block free? */ + error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend, + &is_free); +@@ -1044,7 +1042,7 @@ xfs_rtalloc_query_range( + + /* How long does the extent go for? */ + error = xfs_rtfind_forw(mp, tp, rtstart, +- high_rec->ar_startext - 1, &rtend); ++ high_rec->ar_startext, &rtend); + if (error) + break; + +@@ -1057,7 +1055,6 @@ xfs_rtalloc_query_range( + break; + } + +- rem -= rtend - rtstart + 1; + rtstart = rtend + 1; + } + +diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c +index 3d76a9e35870a..75b57b683d3e6 100644 +--- a/fs/xfs/xfs_fsmap.c ++++ b/fs/xfs/xfs_fsmap.c +@@ -259,6 +259,9 @@ xfs_getfsmap_helper( + + /* Are we just counting mappings? */ + if (info->head->fmh_count == 0) { ++ if (info->head->fmh_entries == UINT_MAX) ++ return -ECANCELED; ++ + if (rec_daddr > info->next_daddr) + info->head->fmh_entries++; + +diff --git a/include/linux/oom.h b/include/linux/oom.h +index 69864a547663e..3f649be179dad 100644 +--- a/include/linux/oom.h ++++ b/include/linux/oom.h +@@ -45,6 +45,7 @@ struct oom_control { + }; + + extern struct mutex oom_lock; ++extern struct mutex oom_adj_mutex; + + static inline void set_current_oom_origin(void) + { +diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h +index ecdc6542070f1..dfd82eab29025 100644 +--- a/include/linux/sched/coredump.h ++++ b/include/linux/sched/coredump.h +@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm) + #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ + #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ + #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ ++#define MMF_MULTIPROCESS 27 /* mm is shared between processes */ + #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) + + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ +diff --git a/include/net/ip.h b/include/net/ip.h +index 5b29f357862dc..aad003685c31d 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -399,12 +399,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + bool forwarding) + { + struct net *net = dev_net(dst->dev); ++ unsigned int mtu; + + if (net->ipv4.sysctl_ip_fwd_use_pmtu || + ip_mtu_locked(dst) || + !forwarding) + return dst_mtu(dst); + ++ /* 'forwarding = true' case should always honour route mtu */ ++ mtu = dst_metric_raw(dst, RTAX_MTU); ++ if (mtu) ++ return mtu; ++ + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + } + +diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h +index 0d3920896d502..716db4a0fed89 100644 +--- a/include/net/netfilter/nf_log.h ++++ b/include/net/netfilter/nf_log.h +@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, + unsigned int logflags); + void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk); ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb); + void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, + unsigned int hooknum, const struct sk_buff *skb, + const struct net_device *in, +diff --git a/kernel/fork.c b/kernel/fork.c +index 1a2d18e98bf99..3ed29bf8eb291 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1647,6 +1647,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk) + free_task(tsk); + } + ++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) ++{ ++ /* Skip if kernel thread */ ++ if (!tsk->mm) ++ return; ++ ++ /* Skip if spawning a thread or using vfork */ ++ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) ++ return; ++ ++ /* We need to synchronize with __set_oom_adj */ ++ mutex_lock(&oom_adj_mutex); ++ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); ++ /* Update the values in case they were changed after copy_signal */ ++ tsk->signal->oom_score_adj = current->signal->oom_score_adj; ++ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; ++ mutex_unlock(&oom_adj_mutex); ++} ++ + /* + * This creates a new process as a copy of the old one, + * but does not actually start it yet. +@@ -2084,6 +2103,8 @@ static __latent_entropy struct task_struct *copy_process( + trace_task_newtask(p, clone_flags); + uprobe_copy_process(p, clone_flags); + ++ copy_oom_score_adj(clone_flags, p); ++ + return p; + + bad_fork_cancel_cgroup: +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index aa730a3d5c258..87cd5bf1b4874 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4780,7 +4780,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, + struct page *page = NULL; + swp_entry_t ent = pte_to_swp_entry(ptent); + +- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) ++ if (!(mc.flags & MOVE_ANON)) + return NULL; + + /* +@@ -4799,6 +4799,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, + return page; + } + ++ if (non_swap_entry(ent)) ++ return NULL; ++ + /* + * Because lookup_swap_cache() updates some statistics counter, + * we call find_get_page() with swapper_space directly. +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index a581fe2a2f1fe..928b3b5e24e6b 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -62,6 +62,8 @@ int sysctl_oom_dump_tasks = 1; + * and mark_oom_victim + */ + DEFINE_MUTEX(oom_lock); ++/* Serializes oom_score_adj and oom_score_adj_min updates */ ++DEFINE_MUTEX(oom_adj_mutex); + + #ifdef CONFIG_NUMA + /** +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 4efa5e33513e3..59c0b1a86e51b 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -244,7 +244,7 @@ static struct { + /** + * icmp_global_allow - Are we allowed to send one more ICMP message ? + * +- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec. ++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. + * Returns false if we reached the limit and can not send another packet. + * Note: called with BH disabled + */ +@@ -272,7 +272,10 @@ bool icmp_global_allow(void) + } + credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); + if (credit) { +- credit--; ++ /* We want to use a credit of one in average, but need to randomize ++ * it for security reasons. ++ */ ++ credit = max_t(int, credit - prandom_u32_max(3), 0); + rc = true; + } + WRITE_ONCE(icmp_global.credit, credit); +diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c +index df5c2a2061a4b..19fff2c589fac 100644 +--- a/net/ipv4/netfilter/nf_log_arp.c ++++ b/net/ipv4/netfilter/nf_log_arp.c +@@ -46,16 +46,31 @@ static void dump_arp_packet(struct nf_log_buf *m, + const struct nf_loginfo *info, + const struct sk_buff *skb, unsigned int nhoff) + { +- const struct arphdr *ah; +- struct arphdr _arph; + const struct arppayload *ap; + struct arppayload _arpp; ++ const struct arphdr *ah; ++ unsigned int logflags; ++ struct arphdr _arph; + + ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); + if (ah == NULL) { + nf_log_buf_add(m, "TRUNCATED"); + return; + } ++ ++ if (info->type == NF_LOG_TYPE_LOG) ++ logflags = info->u.log.logflags; ++ else ++ logflags = NF_LOG_DEFAULT_MASK; ++ ++ if (logflags & NF_LOG_MACDECODE) { ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", ++ ntohs(eth_hdr(skb)->h_proto)); ++ } ++ + nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", + ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); + +diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c +index 1e6f28c97d3a2..cde1918607e9c 100644 +--- a/net/ipv4/netfilter/nf_log_ipv4.c ++++ b/net/ipv4/netfilter/nf_log_ipv4.c +@@ -287,8 +287,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m, + + switch (dev->type) { + case ARPHRD_ETHER: +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", + ntohs(eth_hdr(skb)->h_proto)); + return; + default: +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 3db428242b22d..48081e6d50b4e 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2634,10 +2634,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, + if (IS_ERR(rt)) + return rt; + +- if (flp4->flowi4_proto) ++ if (flp4->flowi4_proto) { ++ flp4->flowi4_oif = rt->dst.dev->ifindex; + rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, + flowi4_to_flowi(flp4), + sk, 0); ++ } + + return rt; + } +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 9813d62de631b..c19870d561861 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5631,6 +5631,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) + tcp_data_snd_check(sk); + if (!inet_csk_ack_scheduled(sk)) + goto no_ack; ++ } else { ++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); + } + + __tcp_ack_snd_check(sk, 0); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index b924941b96a31..8b5459b34bc4a 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -2417,8 +2417,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) + iter->skip = *pos; + + if (iter->tbl) { ++ loff_t p = 0; ++ + ipv6_route_seq_setup_walk(iter, net); +- return ipv6_route_seq_next(seq, NULL, pos); ++ return ipv6_route_seq_next(seq, NULL, &p); + } else { + return NULL; + } +diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c +index c6bf580d0f331..c456e2f902b93 100644 +--- a/net/ipv6/netfilter/nf_log_ipv6.c ++++ b/net/ipv6/netfilter/nf_log_ipv6.c +@@ -300,9 +300,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m, + + switch (dev->type) { + case ARPHRD_ETHER: +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, +- ntohs(eth_hdr(skb)->h_proto)); ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", ++ ntohs(eth_hdr(skb)->h_proto)); + return; + default: + break; +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 3f75cd947045e..11f7c546e57b3 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -586,6 +586,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, + if (ret == NF_ACCEPT) { + nf_reset(skb); + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + } + return ret; + } +@@ -626,6 +628,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, + + if (!local) { + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NULL, skb_dst(skb)->dev, dst_output); + } else +@@ -646,6 +650,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, + if (!local) { + ip_vs_drop_early_demux_sk(skb); + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NULL, skb_dst(skb)->dev, dst_output); + } else +diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c +index a8c5c846aec10..b164a0e1e0536 100644 +--- a/net/netfilter/nf_log_common.c ++++ b/net/netfilter/nf_log_common.c +@@ -176,6 +176,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, + } + EXPORT_SYMBOL_GPL(nf_log_dump_packet_common); + ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb) ++{ ++ u16 vid; ++ ++ if (!skb_vlan_tag_present(skb)) ++ return; ++ ++ vid = skb_vlan_tag_get(skb); ++ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid); ++} ++EXPORT_SYMBOL_GPL(nf_log_dump_vlan); ++ + /* bridge and netdev logging families share this code. */ + void nf_log_l2packet(struct net *net, u_int8_t pf, + __be16 protocol, +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index a65a5a5f434b8..310872a9d6602 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -1235,7 +1235,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) + u32 idx; + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c +index e4fc6b2bc29d2..f43234be5695e 100644 +--- a/net/sched/act_tunnel_key.c ++++ b/net/sched/act_tunnel_key.c +@@ -314,7 +314,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, + + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, + 0, flags, +- key_id, 0); ++ key_id, opts_len); + } else { + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); + ret = -EINVAL; +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c +index 2c9baf8bf1189..e7a6c8dcf6b8e 100644 +--- a/net/smc/smc_core.c ++++ b/net/smc/smc_core.c +@@ -770,7 +770,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, + return buf_desc; + } + +-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ ++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ + + static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, + bool is_dmb, int bufsize) +diff --git a/net/tipc/msg.c b/net/tipc/msg.c +index b078b77620f18..0b8446cd541ce 100644 +--- a/net/tipc/msg.c ++++ b/net/tipc/msg.c +@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) + if (fragid == FIRST_FRAGMENT) { + if (unlikely(head)) + goto err; +- frag = skb_unshare(frag, GFP_ATOMIC); ++ if (skb_cloned(frag)) ++ frag = skb_copy(frag, GFP_ATOMIC); + if (unlikely(!frag)) + goto err; + head = *headbuf = frag; +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index 575d621305786..dd0fc2aa68759 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -351,13 +351,13 @@ static int tls_push_data(struct sock *sk, + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); + int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; +- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); + struct tls_record_info *record = ctx->open_record; + struct page_frag *pfrag; + size_t orig_size = size; + u32 max_open_record_len; +- int copy, rc = 0; ++ bool more = false; + bool done = false; ++ int copy, rc = 0; + long timeo; + + if (flags & +@@ -422,9 +422,8 @@ handle_error: + if (!size) { + last_record: + tls_push_record_flags = flags; +- if (more) { +- tls_ctx->pending_open_record_frags = +- record->num_frags; ++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { ++ more = true; + break; + } + +@@ -445,6 +444,8 @@ last_record: + } + } while (!done); + ++ tls_ctx->pending_open_record_frags = more; ++ + if (orig_size - size > 0) + rc = orig_size - size; + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 4e41792099822..fbc8875502c3e 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -1950,7 +1950,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, + * case we'll continue with more data in the next round, + * but break unconditionally so unsplit data stops here. + */ +- state->split_start++; ++ if (state->split) ++ state->split_start++; ++ else ++ state->split_start = 0; + break; + case 9: + if (rdev->wiphy.extended_capabilities && +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index c5dd05ace28cf..f4f3de5f06ca5 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -682,6 +682,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, + ima_pcrread(i, pcr_i); + /* now accumulate with current aggregate */ + rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE); ++ if (rc != 0) ++ return rc; + } + if (!rc) + crypto_shash_final(shash, digest); +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index ed5bca0db3e73..f4a9d9972330b 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -187,9 +187,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + if (snd_BUG_ON(!dp)) + return -ENXIO; + +- mutex_lock(®ister_mutex); ++ if (cmd != SNDCTL_SEQ_SYNC && ++ mutex_lock_interruptible(®ister_mutex)) ++ return -ERESTARTSYS; + rc = snd_seq_oss_ioctl(dp, cmd, arg); +- mutex_unlock(®ister_mutex); ++ if (cmd != SNDCTL_SEQ_SYNC) ++ mutex_unlock(®ister_mutex); + return rc; + } + +diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c +index 04c321e08c626..a04b5880926cb 100644 +--- a/sound/firewire/bebob/bebob_hwdep.c ++++ b/sound/firewire/bebob/bebob_hwdep.c +@@ -37,12 +37,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, + } + + memset(&event, 0, sizeof(event)); ++ count = min_t(long, count, sizeof(event.lock_status)); + if (bebob->dev_lock_changed) { + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; + event.lock_status.status = (bebob->dev_lock_count > 0); + bebob->dev_lock_changed = false; +- +- count = min_t(long, count, sizeof(event.lock_status)); + } + + spin_unlock_irq(&bebob->lock); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 24bc9e4460473..382a8d179eb0d 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -1906,6 +1906,8 @@ enum { + ALC1220_FIXUP_CLEVO_P950, + ALC1220_FIXUP_CLEVO_PB51ED, + ALC1220_FIXUP_CLEVO_PB51ED_PINS, ++ ALC887_FIXUP_ASUS_AUDIO, ++ ALC887_FIXUP_ASUS_HMIC, + }; + + static void alc889_fixup_coef(struct hda_codec *codec, +@@ -2118,6 +2120,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, + alc_fixup_headset_mode_no_hp_mic(codec, fix, action); + } + ++static void alc887_asus_hp_automute_hook(struct hda_codec *codec, ++ struct hda_jack_callback *jack) ++{ ++ struct alc_spec *spec = codec->spec; ++ unsigned int vref; ++ ++ snd_hda_gen_hp_automute(codec, jack); ++ ++ if (spec->gen.hp_jack_present) ++ vref = AC_PINCTL_VREF_80; ++ else ++ vref = AC_PINCTL_VREF_HIZ; ++ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref); ++} ++ ++static void alc887_fixup_asus_jack(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ if (action != HDA_FIXUP_ACT_PROBE) ++ return; ++ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP); ++ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook; ++} ++ + static const struct hda_fixup alc882_fixups[] = { + [ALC882_FIXUP_ABIT_AW9D_MAX] = { + .type = HDA_FIXUP_PINS, +@@ -2375,6 +2402,20 @@ static const struct hda_fixup alc882_fixups[] = { + .chained = true, + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, + }, ++ [ALC887_FIXUP_ASUS_AUDIO] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */ ++ { 0x19, 0x22219420 }, ++ {} ++ }, ++ }, ++ [ALC887_FIXUP_ASUS_HMIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc887_fixup_asus_jack, ++ .chained = true, ++ .chain_id = ALC887_FIXUP_ASUS_AUDIO, ++ }, + }; + + static const struct snd_pci_quirk alc882_fixup_tbl[] = { +@@ -2408,6 +2449,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), ++ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC), + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c +index 292b103abada9..475579a9830a3 100644 +--- a/sound/soc/qcom/lpass-cpu.c ++++ b/sound/soc/qcom/lpass-cpu.c +@@ -182,21 +182,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream, + return 0; + } + +-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream, +- struct snd_soc_dai *dai) +-{ +- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); +- int ret; +- +- ret = regmap_write(drvdata->lpaif_map, +- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), +- 0); +- if (ret) +- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret); +- +- return ret; +-} +- + static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) + { +@@ -277,7 +262,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = { + .startup = lpass_cpu_daiops_startup, + .shutdown = lpass_cpu_daiops_shutdown, + .hw_params = lpass_cpu_daiops_hw_params, +- .hw_free = lpass_cpu_daiops_hw_free, + .prepare = lpass_cpu_daiops_prepare, + .trigger = lpass_cpu_daiops_trigger, + }; +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c +index d07271ea4c451..2f29672477892 100644 +--- a/sound/soc/qcom/lpass-platform.c ++++ b/sound/soc/qcom/lpass-platform.c +@@ -69,7 +69,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) + int ret, dma_ch, dir = substream->stream; + struct lpass_pcm_data *data; + +- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); ++ data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + +@@ -127,6 +127,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream) + if (v->free_dma_channel) + v->free_dma_channel(drvdata, data->dma_ch); + ++ kfree(data); + return 0; + } + |