diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1005_linux-4.20.6.patch | 4954 |
2 files changed, 4958 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 32a3dd6f..8bd41638 100644 --- a/0000_README +++ b/0000_README @@ -63,6 +63,10 @@ Patch: 1004_linux-4.20.5.patch From: http://www.kernel.org Desc: Linux 4.20.5 +Patch: 1005_linux-4.20.6.patch +From: http://www.kernel.org +Desc: Linux 4.20.6 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1005_linux-4.20.6.patch b/1005_linux-4.20.6.patch new file mode 100644 index 00000000..231d7f33 --- /dev/null +++ b/1005_linux-4.20.6.patch @@ -0,0 +1,4954 @@ +diff --git a/Makefile b/Makefile +index 690f6a9d9f1b..523922ea9c97 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 20 +-SUBLEVEL = 5 ++SUBLEVEL = 6 + EXTRAVERSION = + NAME = Shy Crocodile + +diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h +index 9185541035cc..6958545390f0 100644 +--- a/arch/arc/include/asm/perf_event.h ++++ b/arch/arc/include/asm/perf_event.h +@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { + + /* counts condition */ + [PERF_COUNT_HW_INSTRUCTIONS] = "iall", +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ ++ /* All jump instructions that are taken */ ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ + #ifdef CONFIG_ISA_ARCV2 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", +diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S +index 62ad4bcb841a..f230bb7092fd 100644 +--- a/arch/arc/lib/memset-archs.S ++++ b/arch/arc/lib/memset-archs.S +@@ -7,11 +7,39 @@ + */ + + #include <linux/linkage.h> ++#include <asm/cache.h> + +-#undef PREALLOC_NOT_AVAIL ++/* ++ * The memset implementation below is optimized to use prefetchw and prealloc ++ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) ++ * If you want to implement optimized memset for other possible L1 data cache ++ * line lengths (32B and 128B) you should rewrite code carefully checking ++ * we don't call any prefetchw/prealloc instruction for L1 cache lines which ++ * don't belongs to memset area. ++ */ ++ ++#if L1_CACHE_SHIFT == 6 ++ ++.macro PREALLOC_INSTR reg, off ++ prealloc [\reg, \off] ++.endm ++ ++.macro PREFETCHW_INSTR reg, off ++ prefetchw [\reg, \off] ++.endm ++ ++#else ++ ++.macro PREALLOC_INSTR ++.endm ++ ++.macro PREFETCHW_INSTR ++.endm ++ ++#endif + + ENTRY_CFI(memset) +- prefetchw [r0] ; Prefetch the write location ++ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location + mov.f 0, r2 + ;;; if size is zero + jz.d [blink] +@@ -48,11 +76,8 @@ ENTRY_CFI(memset) + + lpnz @.Lset64bytes + ;; LOOP START +-#ifdef PREALLOC_NOT_AVAIL +- prefetchw [r3, 64] ;Prefetch the next write location +-#else +- prealloc [r3, 64] +-#endif ++ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching ++ + #ifdef CONFIG_ARC_HAS_LL64 + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] +@@ -85,7 +110,6 @@ ENTRY_CFI(memset) + lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes + lpnz .Lset32bytes + ;; LOOP START +- prefetchw [r3, 32] ;Prefetch the next write location + #ifdef CONFIG_ARC_HAS_LL64 + std.ab r4, [r3, 8] + std.ab r4, [r3, 8] +diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c +index f8fe5668b30f..a56e6a8ed259 100644 +--- a/arch/arc/mm/init.c ++++ b/arch/arc/mm/init.c +@@ -137,7 +137,8 @@ void __init setup_arch_memory(void) + */ + + memblock_add_node(low_mem_start, low_mem_sz, 0); +- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); ++ memblock_reserve(CONFIG_LINUX_LINK_BASE, ++ __pa(_end) - CONFIG_LINUX_LINK_BASE); + + #ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index 19516fbc2c55..5461d589a1e2 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -278,7 +278,7 @@ + * If we are building for big.Little with branch predictor hardening, + * we need the processor function tables to remain available after boot. + */ +-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .section ".rodata" + #endif + .type \name\()_processor_functions, #object +@@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions) + .endif + + .size \name\()_processor_functions, . - \name\()_processor_functions +-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .previous + #endif + .endm +diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h +index ccbb53e22024..8d04e6f3f796 100644 +--- a/arch/s390/include/asm/mmu_context.h ++++ b/arch/s390/include/asm/mmu_context.h +@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk, + atomic_set(&mm->context.flush_count, 0); + mm->context.gmap_asce = 0; + mm->context.flush_mm = 0; +- mm->context.compat_mm = 0; ++ mm->context.compat_mm = test_thread_flag(TIF_31BIT); + #ifdef CONFIG_PGSTE + mm->context.alloc_pgste = page_table_allocate_pgste || + test_thread_flag(TIF_PGSTE) || +@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + { + int cpu = smp_processor_id(); + +- if (prev == next) +- return; + S390_lowcore.user_asce = next->context.asce; + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + /* Clear previous user-ASCE from CR1 and CR7 */ +@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + __ctl_load(S390_lowcore.vdso_asce, 7, 7); + clear_cpu_flag(CIF_ASCE_SECONDARY); + } +- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); ++ if (prev != next) ++ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + } + + #define finish_arch_post_lock_switch finish_arch_post_lock_switch +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c +index af5c2b3f7065..a8c7789b246b 100644 +--- a/arch/s390/kernel/early.c ++++ b/arch/s390/kernel/early.c +@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void) + if (stsi(vmms, 3, 2, 2) || !vmms->count) + return; + +- /* Running under KVM? If not we assume z/VM */ ++ /* Detect known hypervisors */ + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) + S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; +- else ++ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) + S390_lowcore.machine_flags |= MACHINE_FLAG_VM; + } + +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c +index 72dd23ef771b..7ed90a759135 100644 +--- a/arch/s390/kernel/setup.c ++++ b/arch/s390/kernel/setup.c +@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p) + pr_info("Linux is running under KVM in 64-bit mode\n"); + else if (MACHINE_IS_LPAR) + pr_info("Linux is running natively in 64-bit mode\n"); ++ else ++ pr_info("Linux is running as a guest in 64-bit mode\n"); + + /* Have one command line that is parsed and saved in /proc/cmdline */ + /* boot_command_line has been already set up in early.c */ +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index f82b3d3c36e2..b198ece2aad6 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) + */ + void smp_call_ipl_cpu(void (*func)(void *), void *data) + { ++ struct lowcore *lc = pcpu_devices->lowcore; ++ ++ if (pcpu_devices[0].address == stap()) ++ lc = &S390_lowcore; ++ + pcpu_delegate(&pcpu_devices[0], func, data, +- pcpu_devices->lowcore->nodat_stack); ++ lc->nodat_stack); + } + + int smp_find_processor_id(u16 address) +@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev, + { + int rc; + ++ rc = lock_device_hotplug_sysfs(); ++ if (rc) ++ return rc; + rc = smp_rescan_cpus(); ++ unlock_device_hotplug(); + return rc ? rc : count; + } + static DEVICE_ATTR_WO(rescan); +diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c +index ebe748a9f472..4ff354887db4 100644 +--- a/arch/s390/kernel/vdso.c ++++ b/arch/s390/kernel/vdso.c +@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + + vdso_pages = vdso64_pages; + #ifdef CONFIG_COMPAT +- if (is_compat_task()) { ++ mm->context.compat_mm = is_compat_task(); ++ if (mm->context.compat_mm) + vdso_pages = vdso32_pages; +- mm->context.compat_mm = 1; +- } + #endif + /* + * vDSO has a problem and was disabled, just don't "enable" it for +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S +index 8eaf8952c408..39913770a44d 100644 +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat) + + /* Need to switch before accessing the thread stack. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi +- movq %rsp, %rdi ++ /* In the Xen PV case we already run on the thread stack. */ ++ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + + pushq 6*8(%rdi) /* regs->ss */ +@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat) + pushq 3*8(%rdi) /* regs->cs */ + pushq 2*8(%rdi) /* regs->ip */ + pushq 1*8(%rdi) /* regs->orig_ax */ +- + pushq (%rdi) /* pt_regs->di */ ++.Lint80_keep_stack: ++ + pushq %rsi /* pt_regs->si */ + xorl %esi, %esi /* nospec si */ + pushq %rdx /* pt_regs->dx */ +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index 0ca50611e8ce..19d18fae6ec6 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) + + void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); + ++/* ++ * Init a new mm. Used on mm copies, like at fork() ++ * and on mm's that are brand-new, like at execve(). ++ */ + static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) + { +@@ -228,8 +232,22 @@ do { \ + } while (0) + #endif + ++static inline void arch_dup_pkeys(struct mm_struct *oldmm, ++ struct mm_struct *mm) ++{ ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS ++ if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) ++ return; ++ ++ /* Duplicate the oldmm pkey state in mm: */ ++ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; ++ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; ++#endif ++} ++ + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) + { ++ arch_dup_pkeys(oldmm, mm); + paravirt_arch_dup_mmap(oldmm, mm); + return ldt_dup_context(oldmm, mm); + } +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index ba4bfb7f6a36..5c93a65ee1e5 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) + #else + u64 ipi_bitmap = 0; + #endif ++ long ret; + + if (cpumask_empty(mask)) + return; +@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) + } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { + max = apic_id < max ? max : apic_id; + } else { +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); + min = max = apic_id; + ipi_bitmap = 0; + } +@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) + } + + if (ipi_bitmap) { +- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, ++ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); ++ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); + } + + local_irq_restore(flags); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 95784bc4a53c..5a2c87552122 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8315,11 +8315,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) + if (r < 0) + goto out_vmcs02; + +- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); ++ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); + if (!vmx->nested.cached_vmcs12) + goto out_cached_vmcs12; + +- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); ++ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); + if (!vmx->nested.cached_shadow_vmcs12) + goto out_cached_shadow_vmcs12; + +@@ -14853,13 +14853,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, + copy_shadow_to_vmcs12(vmx); + } + +- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) ++ /* ++ * Copy over the full allocated size of vmcs12 rather than just the size ++ * of the struct. ++ */ ++ if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) + return -EFAULT; + + if (nested_cpu_has_shadow_vmcs(vmcs12) && + vmcs12->vmcs_link_pointer != -1ull) { + if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, +- get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) ++ get_shadow_vmcs12(vcpu), VMCS12_SIZE)) + return -EFAULT; + } + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index f049ecfac7bb..4247cb230bd3 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6407,8 +6407,7 @@ restart: + toggle_interruptibility(vcpu, ctxt->interruptibility); + vcpu->arch.emulate_regs_need_sync_to_vcpu = false; + kvm_rip_write(vcpu, ctxt->eip); +- if (r == EMULATE_DONE && +- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) ++ if (r == EMULATE_DONE && ctxt->tf) + kvm_vcpu_do_singlestep(vcpu, &r); + if (!ctxt->have_exception || + exception_type(ctxt->exception.vector) == EXCPT_TRAP) +@@ -6998,10 +6997,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) + case KVM_HC_CLOCK_PAIRING: + ret = kvm_pv_clock_pairing(vcpu, a0, a1); + break; ++#endif + case KVM_HC_SEND_IPI: + ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); + break; +-#endif + default: + ret = -KVM_ENOSYS; + break; +diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c +index 79778ab200e4..a53665116458 100644 +--- a/arch/x86/lib/kaslr.c ++++ b/arch/x86/lib/kaslr.c +@@ -36,8 +36,8 @@ static inline u16 i8254(void) + u16 status, timer; + + do { +- outb(I8254_PORT_CONTROL, +- I8254_CMD_READBACK | I8254_SELECT_COUNTER0); ++ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, ++ I8254_PORT_CONTROL); + status = inb(I8254_PORT_COUNTER0); + timer = inb(I8254_PORT_COUNTER0); + timer |= inb(I8254_PORT_COUNTER0) << 8; +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index 5912d30020c7..8535e7999769 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -394,6 +394,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) + return id; + } + ++static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, ++ struct nd_cmd_pkg *call_pkg) ++{ ++ if (call_pkg) { ++ int i; ++ ++ if (nfit_mem->family != call_pkg->nd_family) ++ return -ENOTTY; ++ ++ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) ++ if (call_pkg->nd_reserved2[i]) ++ return -EINVAL; ++ return call_pkg->nd_command; ++ } ++ ++ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ ++ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) ++ return cmd; ++ ++ /* ++ * Force function number validation to fail since 0 is never ++ * published as a valid function in dsm_mask. ++ */ ++ return 0; ++} ++ + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) + { +@@ -407,30 +433,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + unsigned long cmd_mask, dsm_mask; + u32 offset, fw_status = 0; + acpi_handle handle; +- unsigned int func; + const guid_t *guid; +- int rc, i; ++ int func, rc, i; + + if (cmd_rc) + *cmd_rc = -EINVAL; +- func = cmd; +- if (cmd == ND_CMD_CALL) { +- call_pkg = buf; +- func = call_pkg->nd_command; +- +- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) +- if (call_pkg->nd_reserved2[i]) +- return -EINVAL; +- } + + if (nvdimm) { + struct acpi_device *adev = nfit_mem->adev; + + if (!adev) + return -ENOTTY; +- if (call_pkg && nfit_mem->family != call_pkg->nd_family) +- return -ENOTTY; + ++ if (cmd == ND_CMD_CALL) ++ call_pkg = buf; ++ func = cmd_to_func(nfit_mem, cmd, call_pkg); ++ if (func < 0) ++ return func; + dimm_name = nvdimm_name(nvdimm); + cmd_name = nvdimm_cmd_name(cmd); + cmd_mask = nvdimm_cmd_mask(nvdimm); +@@ -441,6 +460,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + } else { + struct acpi_device *adev = to_acpi_dev(acpi_desc); + ++ func = cmd; + cmd_name = nvdimm_bus_cmd_name(cmd); + cmd_mask = nd_desc->cmd_mask; + dsm_mask = cmd_mask; +@@ -455,7 +475,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) + return -ENOTTY; + +- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) ++ /* ++ * Check for a valid command. For ND_CMD_CALL, we also have to ++ * make sure that the DSM function is supported. ++ */ ++ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) ++ return -ENOTTY; ++ else if (!test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + in_obj.type = ACPI_TYPE_PACKAGE; +@@ -1844,6 +1870,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, + return 0; + } + ++ /* ++ * Function 0 is the command interrogation function, don't ++ * export it to potential userspace use, and enable it to be ++ * used as an error value in acpi_nfit_ctl(). ++ */ ++ dsm_mask &= ~1UL; ++ + guid = to_nfit_uuid(nfit_mem->family); + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) + if (acpi_check_dsm(adev_dimm->handle, guid, +diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c +index b5e3103c1175..e43c876a9223 100644 +--- a/drivers/char/mwave/mwavedd.c ++++ b/drivers/char/mwave/mwavedd.c +@@ -59,6 +59,7 @@ + #include <linux/mutex.h> + #include <linux/delay.h> + #include <linux/serial_8250.h> ++#include <linux/nospec.h> + #include "smapi.h" + #include "mwavedd.h" + #include "3780i.h" +@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, + ipcnum); + return -EINVAL; + } ++ ipcnum = array_index_nospec(ipcnum, ++ ARRAY_SIZE(pDrvData->IPCs)); + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" + " ipcnum %x entry usIntCount %x\n", +@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, + " Invalid ipcnum %x\n", ipcnum); + return -EINVAL; + } ++ ipcnum = array_index_nospec(ipcnum, ++ ARRAY_SIZE(pDrvData->IPCs)); + PRINTK_3(TRACE_MWAVE, + "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" + " ipcnum %x, usIntCount %x\n", +@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, + ipcnum); + return -EINVAL; + } ++ ipcnum = array_index_nospec(ipcnum, ++ ARRAY_SIZE(pDrvData->IPCs)); + mutex_lock(&mwave_mutex); + if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { + pDrvData->IPCs[ipcnum].bIsEnabled = false; +diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c +index 2d5d8b43727e..c4d0b6f6abf2 100644 +--- a/drivers/clk/socfpga/clk-pll-s10.c ++++ b/drivers/clk/socfpga/clk-pll-s10.c +@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, + /* Read mdiv and fdiv from the fdbck register */ + reg = readl(socfpgaclk->hw.reg + 0x4); + mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; +- vco_freq = (unsigned long long)parent_rate * (mdiv + 6); ++ vco_freq = (unsigned long long)vco_freq * (mdiv + 6); + + return (unsigned long)vco_freq; + } +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c +index 5b238fc314ac..8281dfbf38c2 100644 +--- a/drivers/clk/socfpga/clk-s10.c ++++ b/drivers/clk/socfpga/clk-s10.c +@@ -12,17 +12,17 @@ + + #include "stratix10-clk.h" + +-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", +- "f2s_free_clk",}; ++static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", ++ "f2s-free-clk",}; + static const char * const cntr_mux[] = { "main_pll", "periph_pll", +- "osc1", "cb_intosc_hs_div2_clk", +- "f2s_free_clk"}; +-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; ++ "osc1", "cb-intosc-hs-div2-clk", ++ "f2s-free-clk"}; ++static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; + + static const char * const noc_free_mux[] = {"main_noc_base_clk", + "peri_noc_base_clk", +- "osc1", "cb_intosc_hs_div2_clk", +- "f2s_free_clk"}; ++ "osc1", "cb-intosc-hs-div2-clk", ++ "f2s-free-clk"}; + + static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; + static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; +@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" + static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; + static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; + +-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; ++static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; + static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; + static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; + + static const char * const mpu_free_mux[] = {"main_mpu_base_clk", + "peri_mpu_base_clk", +- "osc1", "cb_intosc_hs_div2_clk", +- "f2s_free_clk"}; ++ "osc1", "cb-intosc-hs-div2-clk", ++ "f2s-free-clk"}; + + /* clocks in AO (always on) controller */ + static const struct stratix10_pll_clock s10_pll_clks[] = { +diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c +index f65cc0ff76ab..b0908ec62f73 100644 +--- a/drivers/clk/zynqmp/clkc.c ++++ b/drivers/clk/zynqmp/clkc.c +@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np) + if (ret) + return ret; + +- zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * +- clock_max_idx, GFP_KERNEL); ++ zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx), ++ GFP_KERNEL); + if (!zynqmp_data) + return -ENOMEM; + +diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h +index 4213cb0bb2a7..f8664bac9fa8 100644 +--- a/drivers/edac/altera_edac.h ++++ b/drivers/edac/altera_edac.h +@@ -295,8 +295,8 @@ struct altr_sdram_mc_data { + #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 + + /* Sticky registers for Uncorrected Errors */ +-#define S10_SYSMGR_UE_VAL_OFST 0x120 +-#define S10_SYSMGR_UE_ADDR_OFST 0x124 ++#define S10_SYSMGR_UE_VAL_OFST 0x220 ++#define S10_SYSMGR_UE_ADDR_OFST 0x224 + + #define S10_DDR0_IRQ_MASK BIT(16) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +index a028661d9e20..92b11de19581 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, ++ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0, 0, 0, 0, 0 }, + }; + +diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c +index 191b314f9e9e..709475d5cc30 100644 +--- a/drivers/gpu/drm/meson/meson_crtc.c ++++ b/drivers/gpu/drm/meson/meson_crtc.c +@@ -45,7 +45,6 @@ struct meson_crtc { + struct drm_crtc base; + struct drm_pending_vblank_event *event; + struct meson_drm *priv; +- bool enabled; + }; + #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) + +@@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { + + }; + +-static void meson_crtc_enable(struct drm_crtc *crtc) ++static void meson_crtc_atomic_enable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_state) + { + struct meson_crtc *meson_crtc = to_meson_crtc(crtc); + struct drm_crtc_state *crtc_state = crtc->state; +@@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) + + drm_crtc_vblank_on(crtc); + +- meson_crtc->enabled = true; +-} +- +-static void meson_crtc_atomic_enable(struct drm_crtc *crtc, +- struct drm_crtc_state *old_state) +-{ +- struct meson_crtc *meson_crtc = to_meson_crtc(crtc); +- struct meson_drm *priv = meson_crtc->priv; +- +- DRM_DEBUG_DRIVER("\n"); +- +- if (!meson_crtc->enabled) +- meson_crtc_enable(crtc); +- + priv->viu.osd1_enabled = true; + } + +@@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, + + crtc->state->event = NULL; + } +- +- meson_crtc->enabled = false; + } + + static void meson_crtc_atomic_begin(struct drm_crtc *crtc, +@@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, + struct meson_crtc *meson_crtc = to_meson_crtc(crtc); + unsigned long flags; + +- if (crtc->state->enable && !meson_crtc->enabled) +- meson_crtc_enable(crtc); +- + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index d3443125e661..bf5f294f172f 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + }; + ++static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { ++ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, ++}; ++ + static irqreturn_t meson_irq(int irq, void *arg) + { + struct drm_device *dev = arg; +@@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) + drm->mode_config.max_width = 3840; + drm->mode_config.max_height = 2160; + drm->mode_config.funcs = &meson_mode_config_funcs; ++ drm->mode_config.helper_private = &meson_mode_config_helpers; + + /* Hardware Initialization */ + +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index fe00b12e4417..bea4c9850247 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -701,20 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel) + int vmbus_disconnect_ring(struct vmbus_channel *channel) + { + struct vmbus_channel *cur_channel, *tmp; +- unsigned long flags; +- LIST_HEAD(list); + int ret; + + if (channel->primary_channel != NULL) + return -EINVAL; + +- /* Snapshot the list of subchannels */ +- spin_lock_irqsave(&channel->lock, flags); +- list_splice_init(&channel->sc_list, &list); +- channel->num_sc = 0; +- spin_unlock_irqrestore(&channel->lock, flags); +- +- list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { ++ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) { + if (cur_channel->rescind) + wait_for_completion(&cur_channel->rescind_event); + +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index edd34c167a9b..d01689079e9b 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel) + primary_channel = channel->primary_channel; + spin_lock_irqsave(&primary_channel->lock, flags); + list_del(&channel->sc_list); +- primary_channel->num_sc--; + spin_unlock_irqrestore(&primary_channel->lock, flags); + } + +@@ -1302,49 +1301,6 @@ cleanup: + return ret; + } + +-/* +- * Retrieve the (sub) channel on which to send an outgoing request. +- * When a primary channel has multiple sub-channels, we try to +- * distribute the load equally amongst all available channels. +- */ +-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) +-{ +- struct list_head *cur, *tmp; +- int cur_cpu; +- struct vmbus_channel *cur_channel; +- struct vmbus_channel *outgoing_channel = primary; +- int next_channel; +- int i = 1; +- +- if (list_empty(&primary->sc_list)) +- return outgoing_channel; +- +- next_channel = primary->next_oc++; +- +- if (next_channel > (primary->num_sc)) { +- primary->next_oc = 0; +- return outgoing_channel; +- } +- +- cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id()); +- list_for_each_safe(cur, tmp, &primary->sc_list) { +- cur_channel = list_entry(cur, struct vmbus_channel, sc_list); +- if (cur_channel->state != CHANNEL_OPENED_STATE) +- continue; +- +- if (cur_channel->target_vp == cur_cpu) +- return cur_channel; +- +- if (i == next_channel) +- return cur_channel; +- +- i++; +- } +- +- return outgoing_channel; +-} +-EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); +- + static void invoke_sc_cb(struct vmbus_channel *primary_channel) + { + struct list_head *cur, *tmp; +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index 41631512ae97..7b9fbd84d6df 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, + pfn_cnt -= pgs_ol; + /* + * Check if the corresponding memory block is already +- * online by checking its last previously backed page. +- * In case it is we need to bring rest (which was not +- * backed previously) online too. ++ * online. It is possible to observe struct pages still ++ * being uninitialized here so check section instead. ++ * In case the section is online we need to bring the ++ * rest of pfns (which were not backed previously) ++ * online too. + */ + if (start_pfn > has->start_pfn && +- !PageReserved(pfn_to_page(start_pfn - 1))) ++ online_section_nr(pfn_to_section_nr(start_pfn))) + hv_bring_pgs_online(has, start_pfn, pgs_ol); + + } +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c +index 64d0c85d5161..1f1a55e07733 100644 +--- a/drivers/hv/ring_buffer.c ++++ b/drivers/hv/ring_buffer.c +@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, + } + + /* Get various debug metrics for the specified ring buffer. */ +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, +- struct hv_ring_buffer_debug_info *debug_info) ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, ++ struct hv_ring_buffer_debug_info *debug_info) + { + u32 bytes_avail_towrite; + u32 bytes_avail_toread; + +- if (ring_info->ring_buffer) { +- hv_get_ringbuffer_availbytes(ring_info, +- &bytes_avail_toread, +- &bytes_avail_towrite); +- +- debug_info->bytes_avail_toread = bytes_avail_toread; +- debug_info->bytes_avail_towrite = bytes_avail_towrite; +- debug_info->current_read_index = +- ring_info->ring_buffer->read_index; +- debug_info->current_write_index = +- ring_info->ring_buffer->write_index; +- debug_info->current_interrupt_mask = +- ring_info->ring_buffer->interrupt_mask; +- } ++ if (!ring_info->ring_buffer) ++ return -EINVAL; ++ ++ hv_get_ringbuffer_availbytes(ring_info, ++ &bytes_avail_toread, ++ &bytes_avail_towrite); ++ debug_info->bytes_avail_toread = bytes_avail_toread; ++ debug_info->bytes_avail_towrite = bytes_avail_towrite; ++ debug_info->current_read_index = ring_info->ring_buffer->read_index; ++ debug_info->current_write_index = ring_info->ring_buffer->write_index; ++ debug_info->current_interrupt_mask ++ = ring_info->ring_buffer->interrupt_mask; ++ return 0; + } + EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); + +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index d0ff65675292..403fee01572c 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info outbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, ++ &outbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", outbound.current_interrupt_mask); + } + static DEVICE_ATTR_RO(out_intr_mask); +@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info outbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, ++ &outbound); ++ if (ret < 0) ++ return ret; + return sprintf(buf, "%d\n", outbound.current_read_index); + } + static DEVICE_ATTR_RO(out_read_index); +@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info outbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, ++ &outbound); ++ if (ret < 0) ++ return ret; + return sprintf(buf, "%d\n", outbound.current_write_index); + } + static DEVICE_ATTR_RO(out_write_index); +@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info outbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, ++ &outbound); ++ if (ret < 0) ++ return ret; + return sprintf(buf, "%d\n", outbound.bytes_avail_toread); + } + static DEVICE_ATTR_RO(out_read_bytes_avail); +@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info outbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, ++ &outbound); ++ if (ret < 0) ++ return ret; + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); + } + static DEVICE_ATTR_RO(out_write_bytes_avail); +@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info inbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", inbound.current_interrupt_mask); + } + static DEVICE_ATTR_RO(in_intr_mask); +@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info inbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", inbound.current_read_index); + } + static DEVICE_ATTR_RO(in_read_index); +@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info inbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", inbound.current_write_index); + } + static DEVICE_ATTR_RO(in_write_index); +@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info inbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", inbound.bytes_avail_toread); + } + static DEVICE_ATTR_RO(in_read_bytes_avail); +@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, + { + struct hv_device *hv_dev = device_to_hv_device(dev); + struct hv_ring_buffer_debug_info inbound; ++ int ret; + + if (!hv_dev->channel) + return -ENODEV; +- if (hv_dev->channel->state != CHANNEL_OPENED_STATE) +- return -EINVAL; +- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ ++ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); ++ if (ret < 0) ++ return ret; ++ + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); + } + static DEVICE_ATTR_RO(in_write_bytes_avail); +diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c +index 4c8c7a620d08..a5dc13576394 100644 +--- a/drivers/ide/ide-proc.c ++++ b/drivers/ide/ide-proc.c +@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) + drive->proc = proc_mkdir(drive->name, parent); + if (drive->proc) { + ide_add_proc_entries(drive->proc, generic_drive_entries, drive); +- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, ++ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, + drive->proc, &ide_settings_proc_fops, + drive); + } +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index cfc8b94527b9..aa4e431cbcd3 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -252,6 +252,8 @@ static const struct xpad_device { + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, ++ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, ++ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, +@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ ++ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c +index 8ec483e8688b..26ec603fe220 100644 +--- a/drivers/input/misc/uinput.c ++++ b/drivers/input/misc/uinput.c +@@ -39,6 +39,7 @@ + #include <linux/init.h> + #include <linux/fs.h> + #include <linux/miscdevice.h> ++#include <linux/overflow.h> + #include <linux/input/mt.h> + #include "../input-compat.h" + +@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) + static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, + const struct input_absinfo *abs) + { +- int min, max; ++ int min, max, range; + + min = abs->minimum; + max = abs->maximum; +@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, + return -EINVAL; + } + +- if (abs->flat > max - min) { ++ if (!check_sub_overflow(max, min, &range) && abs->flat > range) { + printk(KERN_DEBUG + "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", + UINPUT_NAME, code, abs->flat, min, max); +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index db20e992a40f..7f2a45445b00 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -2399,13 +2399,14 @@ static void its_free_device(struct its_device *its_dev) + kfree(its_dev); + } + +-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) ++static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) + { + int idx; + +- idx = find_first_zero_bit(dev->event_map.lpi_map, +- dev->event_map.nr_lpis); +- if (idx == dev->event_map.nr_lpis) ++ idx = bitmap_find_free_region(dev->event_map.lpi_map, ++ dev->event_map.nr_lpis, ++ get_count_order(nvecs)); ++ if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; +@@ -2501,21 +2502,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + int err; + int i; + +- for (i = 0; i < nr_irqs; i++) { +- err = its_alloc_device_irq(its_dev, &hwirq); +- if (err) +- return err; ++ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); ++ if (err) ++ return err; + +- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); ++ for (i = 0; i < nr_irqs; i++) { ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, +- hwirq, &its_irq_chip, its_dev); ++ hwirq + i, &its_irq_chip, its_dev); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + pr_debug("ID:%d pID:%d vID:%d\n", +- (int)(hwirq - its_dev->event_map.lpi_base), +- (int) hwirq, virq + i); ++ (int)(hwirq + i - its_dev->event_map.lpi_base), ++ (int)(hwirq + i), virq + i); + } + + return 0; +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index fc7d8b8a654f..1ef828575fae 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key + * capi:cipher_api_spec-iv:ivopts + */ + tmp = &cipher_in[strlen("capi:")]; +- cipher_api = strsep(&tmp, "-"); +- *ivmode = strsep(&tmp, ":"); +- *ivopts = tmp; ++ ++ /* Separate IV options if present, it can contain another '-' in hash name */ ++ *ivopts = strrchr(tmp, ':'); ++ if (*ivopts) { ++ **ivopts = '\0'; ++ (*ivopts)++; ++ } ++ /* Parse IV mode */ ++ *ivmode = strrchr(tmp, '-'); ++ if (*ivmode) { ++ **ivmode = '\0'; ++ (*ivmode)++; ++ } ++ /* The rest is crypto API spec */ ++ cipher_api = tmp; + + if (*ivmode && !strcmp(*ivmode, "lmk")) + cc->tfms_count = 64; +@@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key + goto bad_mem; + + chainmode = strsep(&tmp, "-"); +- *ivopts = strsep(&tmp, "-"); +- *ivmode = strsep(&*ivopts, ":"); +- +- if (tmp) +- DMWARN("Ignoring unexpected additional cipher options"); ++ *ivmode = strsep(&tmp, ":"); ++ *ivopts = tmp; + + /* + * For compatibility with the original dm-crypt mapping format, if +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 20b0776e39ef..ed3caceaed07 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, + return r; + } + +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) + { + int r; + uint32_t ref_count; +@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu + down_read(&pmd->root_lock); + r = dm_sm_get_count(pmd->data_sm, b, &ref_count); + if (!r) +- *result = (ref_count != 0); ++ *result = (ref_count > 1); + up_read(&pmd->root_lock); + + return r; +diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h +index 35e954ea20a9..f6be0d733c20 100644 +--- a/drivers/md/dm-thin-metadata.h ++++ b/drivers/md/dm-thin-metadata.h +@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, + + int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); + +-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); ++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); + + int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); + int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index dadd9696340c..ca8af21bf644 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m + * passdown we have to check that these blocks are now unused. + */ + int r = 0; +- bool used = true; ++ bool shared = true; + struct thin_c *tc = m->tc; + struct pool *pool = tc->pool; + dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; +@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m + while (b != end) { + /* find start of unmapped run */ + for (; b < end; b++) { +- r = dm_pool_block_is_used(pool->pmd, b, &used); ++ r = dm_pool_block_is_shared(pool->pmd, b, &shared); + if (r) + goto out; + +- if (!used) ++ if (!shared) + break; + } + +@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m + + /* find end of run */ + for (e = b + 1; e != end; e++) { +- r = dm_pool_block_is_used(pool->pmd, e, &used); ++ r = dm_pool_block_is_shared(pool->pmd, e, &shared); + if (r) + goto out; + +- if (used) ++ if (shared) + break; + } + +diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c +index b8aaa684c397..2ed23c99f59f 100644 +--- a/drivers/misc/ibmvmc.c ++++ b/drivers/misc/ibmvmc.c +@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, + * + * Return: + * 0 - Success ++ * Non-zero - Failure + */ + static int ibmvmc_open(struct inode *inode, struct file *file) + { + struct ibmvmc_file_session *session; +- int rc = 0; + + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)inode, (unsigned long)file, + ibmvmc.state); + + session = kzalloc(sizeof(*session), GFP_KERNEL); ++ if (!session) ++ return -ENOMEM; ++ + session->file = file; + file->private_data = session; + +- return rc; ++ return 0; + } + + /** +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index e4b10b2d1a08..23739a60517f 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -127,6 +127,8 @@ + #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ + #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ + ++#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ ++ + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ + + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index ea4e152270a3..c8e21c894a5f 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, ++ + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, +diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c +index 54c3fbb4a391..db56d4f58aaa 100644 +--- a/drivers/mmc/host/dw_mmc-bluefield.c ++++ b/drivers/mmc/host/dw_mmc-bluefield.c +@@ -1,11 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + /* + * Copyright (C) 2018 Mellanox Technologies. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. + */ + + #include <linux/bitfield.h> +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c +index c201c378537e..ef9deaa361c7 100644 +--- a/drivers/mmc/host/meson-gx-mmc.c ++++ b/drivers/mmc/host/meson-gx-mmc.c +@@ -174,6 +174,8 @@ struct meson_host { + struct sd_emmc_desc *descs; + dma_addr_t descs_dma_addr; + ++ int irq; ++ + bool vqmmc_enabled; + }; + +@@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev) + struct resource *res; + struct meson_host *host; + struct mmc_host *mmc; +- int ret, irq; ++ int ret; + + mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); + if (!mmc) +@@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev) + goto free_host; + } + +- irq = platform_get_irq(pdev, 0); +- if (irq <= 0) { ++ host->irq = platform_get_irq(pdev, 0); ++ if (host->irq <= 0) { + dev_err(&pdev->dev, "failed to get interrupt resource.\n"); + ret = -EINVAL; + goto free_host; +@@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev) + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_IRQ_EN); + +- ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, +- meson_mmc_irq_thread, IRQF_SHARED, +- NULL, host); ++ ret = request_threaded_irq(host->irq, meson_mmc_irq, ++ meson_mmc_irq_thread, IRQF_SHARED, NULL, host); + if (ret) + goto err_init_clk; + +@@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev) + if (host->bounce_buf == NULL) { + dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); + ret = -ENOMEM; +- goto err_init_clk; ++ goto err_free_irq; + } + + host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, +@@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev) + err_bounce_buf: + dma_free_coherent(host->dev, host->bounce_buf_size, + host->bounce_buf, host->bounce_dma_addr); ++err_free_irq: ++ free_irq(host->irq, host); + err_init_clk: + clk_disable_unprepare(host->mmc_clk); + err_core_clk: +@@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev) + + /* disable interrupts */ + writel(0, host->regs + SD_EMMC_IRQ_EN); ++ free_irq(host->irq, host); + + dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, + host->descs, host->descs_dma_addr); +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c +index 0db99057c44f..9d12c06c7fd6 100644 +--- a/drivers/mmc/host/sdhci-iproc.c ++++ b/drivers/mmc/host/sdhci-iproc.c +@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev) + + iproc_host->data = iproc_data; + +- mmc_of_parse(host->mmc); ++ ret = mmc_of_parse(host->mmc); ++ if (ret) ++ goto err; ++ + sdhci_get_property(pdev); + + host->mmc->caps |= iproc_host->data->mmc_caps; +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 3b3f88ffab53..c05e4d50d43d 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); + struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) + { + struct can_priv *priv = netdev_priv(dev); +- struct sk_buff *skb = priv->echo_skb[idx]; +- struct canfd_frame *cf; + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", +@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 + return NULL; + } + +- if (!skb) { +- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", +- __func__, idx); +- return NULL; +- } ++ if (priv->echo_skb[idx]) { ++ /* Using "struct canfd_frame::len" for the frame ++ * length is supported on both CAN and CANFD frames. ++ */ ++ struct sk_buff *skb = priv->echo_skb[idx]; ++ struct canfd_frame *cf = (struct canfd_frame *)skb->data; ++ u8 len = cf->len; + +- /* Using "struct canfd_frame::len" for the frame +- * length is supported on both CAN and CANFD frames. +- */ +- cf = (struct canfd_frame *)skb->data; +- *len_ptr = cf->len; +- priv->echo_skb[idx] = NULL; ++ *len_ptr = len; ++ priv->echo_skb[idx] = NULL; + +- return skb; ++ return skb; ++ } ++ ++ return NULL; + } + + /* +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c +index 75ce11395ee8..ae219b8a7754 100644 +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev) + } + } else { + /* clear and invalidate unused mailboxes first */ +- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) { ++ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) { + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); + } +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +index d272dc6984ac..b40d4377cc71 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +@@ -431,8 +431,6 @@ + #define MAC_MDIOSCAR_PA_WIDTH 5 + #define MAC_MDIOSCAR_RA_INDEX 0 + #define MAC_MDIOSCAR_RA_WIDTH 16 +-#define MAC_MDIOSCAR_REG_INDEX 0 +-#define MAC_MDIOSCAR_REG_WIDTH 21 + #define MAC_MDIOSCCDR_BUSY_INDEX 22 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1 + #define MAC_MDIOSCCDR_CMD_INDEX 16 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +index 1e929a1e4ca7..4666084eda16 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, + } + } + ++static unsigned int xgbe_create_mdio_sca(int port, int reg) ++{ ++ unsigned int mdio_sca, da; ++ ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; ++ ++ mdio_sca = 0; ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); ++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); ++ ++ return mdio_sca; ++} ++ + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, + int reg, u16 val) + { +@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, + + reinit_completion(&pdata->mdio_complete); + +- mdio_sca = 0; +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = xgbe_create_mdio_sca(addr, reg); + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, + + reinit_completion(&pdata->mdio_complete); + +- mdio_sca = 0; +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = xgbe_create_mdio_sca(addr, reg); + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c +index 5890fdfd62c3..c7901a3f2a79 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c +@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); ++ char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; ++ ++ memcpy(ncqe, cqe, q->elem_size); ++ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + + if (sendq) { + struct mlxsw_pci_queue *sdq; + + sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, +- wqe_counter, cqe); ++ wqe_counter, ncqe); + q->u.cq.comp_sdq_count++; + } else { + struct mlxsw_pci_queue *rdq; + + rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, +- wqe_counter, q->u.cq.v, cqe); ++ wqe_counter, q->u.cq.v, ncqe); + q->u.cq.comp_rdq_count++; + } + if (++items == credits) + break; + } +- if (items) { +- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); ++ if (items) + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); +- } + } + + static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +index bb99f6d41fe0..ffee38e36ce8 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +@@ -27,7 +27,7 @@ + + #define MLXSW_PCI_SW_RESET 0xF0010 + #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) +-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 ++#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 + #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 + #define MLXSW_PCI_FW_READY 0xA1844 + #define MLXSW_PCI_FW_READY_MASK 0xFFFF +@@ -53,6 +53,7 @@ + #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ + #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ + #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ ++#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE + #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ + #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) + #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +index a3db033d7399..b490589ef25c 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +@@ -882,8 +882,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { + static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { + .type = MLXSW_SP_FID_TYPE_DUMMY, + .fid_size = sizeof(struct mlxsw_sp_fid), +- .start_index = MLXSW_SP_RFID_BASE - 1, +- .end_index = MLXSW_SP_RFID_BASE - 1, ++ .start_index = VLAN_N_VID - 1, ++ .end_index = VLAN_N_VID - 1, + .ops = &mlxsw_sp_fid_dummy_ops, + }; + +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c +index 9020b084b953..7ec4eb74fe21 100644 +--- a/drivers/net/ethernet/sun/cassini.c ++++ b/drivers/net/ethernet/sun/cassini.c +@@ -1,22 +1,9 @@ +-// SPDX-License-Identifier: GPL-2.0 ++// SPDX-License-Identifier: GPL-2.0+ + /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) + * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License as +- * published by the Free Software Foundation; either version 2 of the +- * License, or (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, see <http://www.gnu.org/licenses/>. +- * + * This driver uses the sungem driver (c) David Miller + * (davem@redhat.com) as its basis. + * +diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h +index 13f3860496a8..ae5f05f03f88 100644 +--- a/drivers/net/ethernet/sun/cassini.h ++++ b/drivers/net/ethernet/sun/cassini.h +@@ -1,23 +1,10 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ ++/* SPDX-License-Identifier: GPL-2.0+ */ + /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) + * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License as +- * published by the Free Software Foundation; either version 2 of the +- * License, or (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, see <http://www.gnu.org/licenses/>. +- * + * vendor id: 0x108E (Sun Microsystems, Inc.) + * device id: 0xabba (Cassini) + * revision ids: 0x01 = Cassini +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index cbec296107bd..f46da6262abe 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -1042,6 +1042,39 @@ static int m88e1145_config_init(struct phy_device *phydev) + return 0; + } + ++/* The VOD can be out of specification on link up. Poke an ++ * undocumented register, in an undocumented page, with a magic value ++ * to fix this. ++ */ ++static int m88e6390_errata(struct phy_device *phydev) ++{ ++ int err; ++ ++ err = phy_write(phydev, MII_BMCR, ++ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); ++ if (err) ++ return err; ++ ++ usleep_range(300, 400); ++ ++ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); ++ if (err) ++ return err; ++ ++ return genphy_soft_reset(phydev); ++} ++ ++static int m88e6390_config_aneg(struct phy_device *phydev) ++{ ++ int err; ++ ++ err = m88e6390_errata(phydev); ++ if (err) ++ return err; ++ ++ return m88e1510_config_aneg(phydev); ++} ++ + /** + * fiber_lpa_to_ethtool_lpa_t + * @lpa: value of the MII_LPA register for fiber link +@@ -1397,7 +1430,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) +- phy_read(phydev, MII_M1011_IEVENT); ++ __phy_read(phydev, MII_M1011_IEVENT); + + /* Enable the WOL interrupt */ + err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, +@@ -2292,7 +2325,7 @@ static struct phy_driver marvell_drivers[] = { + .flags = PHY_HAS_INTERRUPT, + .probe = m88e6390_probe, + .config_init = &marvell_config_init, +- .config_aneg = &m88e1510_config_aneg, ++ .config_aneg = &m88e6390_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 2e59a8419b17..66b9cfe692fc 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) + if (IS_ERR(gpiod)) { + dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", + bus->id); ++ device_del(&bus->dev); + return PTR_ERR(gpiod); + } else if (gpiod) { + bus->reset_gpiod = gpiod; +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index fd051ae787cb..5dd661fb662f 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -2196,6 +2196,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) + { + int retval; + ++ if (WARN_ON(!new_driver->features)) { ++ pr_err("%s: Driver features are missing\n", new_driver->name); ++ return -EINVAL; ++ } ++ + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; + new_driver->mdiodrv.driver.name = new_driver->name; + new_driver->mdiodrv.driver.bus = &mdio_bus_type; +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c +index 62dc564b251d..f22639f0116a 100644 +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, + if (pskb_trim_rcsum(skb, len)) + goto drop; + ++ ph = pppoe_hdr(skb); + pn = pppoe_pernet(dev_net(dev)); + + /* Note that get_item does a sock_hold(), so sk_pppox(po) +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h +index 3bfa7f5e3513..2e5bcb3fdff7 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h +@@ -1,3 +1,4 @@ ++ + /* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +index 9273d2d2764a..732f4b87fdcb 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +@@ -116,9 +116,6 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + } + +- if (changed & BSS_CHANGED_ASSOC) +- mt76x0_phy_recalibrate_after_assoc(dev); +- + mutex_unlock(&dev->mt76.mutex); + } + EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed); +@@ -138,6 +135,12 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, + struct mt76x02_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); ++ ++ if (dev->cal.gain_init_done) { ++ /* Restore AGC gain and resume calibration after scanning. */ ++ dev->cal.low_gain = -1; ++ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0); ++ } + } + EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); + +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +index 2187bafaf2e9..0057f69d0c36 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +@@ -41,6 +41,11 @@ static inline bool is_mt7610e(struct mt76x02_dev *dev) + + void mt76x0_init_debugfs(struct mt76x02_dev *dev); + ++static inline bool is_mt7630(struct mt76x02_dev *dev) ++{ ++ return mt76_chip(&dev->mt76) == 0x7630; ++} ++ + /* Init */ + struct mt76x02_dev * + mt76x0_alloc_device(struct device *pdev, +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +index cf024950e0ed..c34abd1c6030 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +@@ -215,62 +215,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) + return 0; + } + +-static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) +-{ +- u8 val; +- +- val = rf_rr(dev, MT_RF(0, 4)); +- if ((val & 0x70) != 0x30) +- return; +- +- /* +- * Calibration Mode - Open loop, closed loop, and amplitude: +- * B0.R06.[0]: 1 +- * B0.R06.[3:1] bp_close_code: 100 +- * B0.R05.[7:0] bp_open_code: 0x0 +- * B0.R04.[2:0] cal_bits: 000 +- * B0.R03.[2:0] startup_time: 011 +- * B0.R03.[6:4] settle_time: +- * 80MHz channel: 110 +- * 40MHz channel: 101 +- * 20MHz channel: 100 +- */ +- val = rf_rr(dev, MT_RF(0, 6)); +- val &= ~0xf; +- val |= 0x09; +- rf_wr(dev, MT_RF(0, 6), val); +- +- val = rf_rr(dev, MT_RF(0, 5)); +- if (val != 0) +- rf_wr(dev, MT_RF(0, 5), 0x0); +- +- val = rf_rr(dev, MT_RF(0, 4)); +- val &= ~0x07; +- rf_wr(dev, MT_RF(0, 4), val); +- +- val = rf_rr(dev, MT_RF(0, 3)); +- val &= ~0x77; +- if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) { +- val |= 0x63; +- } else if (channel == 3 || channel == 4 || channel == 10) { +- val |= 0x53; +- } else if (channel == 2 || channel == 5 || channel == 6 || +- channel == 8 || channel == 11 || channel == 12) { +- val |= 0x43; +- } else { +- WARN(1, "Unknown channel %u\n", channel); +- return; +- } +- rf_wr(dev, MT_RF(0, 3), val); +- +- /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */ +- val = rf_rr(dev, MT_RF(0, 4)); +- val = ((val & ~(0x80)) | 0x80); +- rf_wr(dev, MT_RF(0, 4), val); +- +- msleep(2); +-} +- + static void + mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) + { +@@ -518,21 +462,47 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band) + + static void mt76x0_ant_select(struct mt76x02_dev *dev) + { +- struct ieee80211_channel *chan = dev->mt76.chandef.chan; +- +- /* single antenna mode */ +- if (chan->band == NL80211_BAND_2GHZ) { +- mt76_rmw(dev, MT_COEXCFG3, +- BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1)); +- mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6)); ++ u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA); ++ u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); ++ u32 wlan, coex3, cmb; ++ bool ant_div; ++ ++ wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL); ++ cmb = mt76_rr(dev, MT_CMB_CTRL); ++ coex3 = mt76_rr(dev, MT_COEXCFG3); ++ ++ cmb &= ~(BIT(14) | BIT(12)); ++ wlan &= ~(BIT(6) | BIT(5)); ++ coex3 &= ~GENMASK(5, 2); ++ ++ if (ee_ant & MT_EE_ANTENNA_DUAL) { ++ /* dual antenna mode */ ++ ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) && ++ (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV); ++ if (ant_div) ++ cmb |= BIT(12); ++ else ++ coex3 |= BIT(4); ++ coex3 |= BIT(3); ++ if (dev->mt76.cap.has_2ghz) ++ wlan |= BIT(6); + } else { +- mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2), +- BIT(4) | BIT(3)); +- mt76_clear(dev, MT_WLAN_FUN_CTRL, +- BIT(6) | BIT(5)); ++ /* sigle antenna mode */ ++ if (dev->mt76.cap.has_5ghz) { ++ coex3 |= BIT(3) | BIT(4); ++ } else { ++ wlan |= BIT(6); ++ coex3 |= BIT(1); ++ } + } +- mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12)); ++ ++ if (is_mt7630(dev)) ++ cmb |= BIT(14) | BIT(11); ++ ++ mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan); ++ mt76_wr(dev, MT_CMB_CTRL, cmb); + mt76_clear(dev, MT_COEXCFG0, BIT(2)); ++ mt76_wr(dev, MT_COEXCFG3, coex3); + } + + static void +@@ -585,8 +555,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) + void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) + { + struct ieee80211_channel *chan = dev->mt76.chandef.chan; ++ int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0; + u32 val, tx_alc, reg_val; + ++ if (is_mt7630(dev)) ++ return; ++ + if (power_on) { + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value, +@@ -602,7 +576,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); + +- if (chan->band == NL80211_BAND_5GHZ) { ++ if (is_5ghz) { + if (chan->hw_value < 100) + val = 0x701; + else if (chan->hw_value < 140) +@@ -615,7 +589,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) + + mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false); + msleep(350); +- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false); ++ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false); + usleep_range(15000, 20000); + + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); +@@ -696,7 +670,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); + mt76x02_phy_set_band(dev, chandef->chan->band, + ch_group_index & 1); +- mt76x0_ant_select(dev); + + mt76_rmw(dev, MT_EXT_CCA_CFG, + (MT_EXT_CCA_CFG_CCA0 | +@@ -719,20 +692,16 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, + + mt76x0_read_rx_gain(dev); + mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band); +- mt76x02_init_agc_gain(dev); + +- if (mt76_is_usb(dev)) { +- mt76x0_vco_cal(dev, channel); +- } else { +- /* enable vco */ +- rf_set(dev, MT_RF(0, 4), BIT(7)); +- } ++ /* enable vco */ ++ rf_set(dev, MT_RF(0, 4), BIT(7)); + + if (scan) + return 0; + +- if (mt76_is_mmio(dev)) +- mt76x0_phy_calibrate(dev, false); ++ mt76x0_phy_calibrate(dev, false); ++ mt76x02_init_agc_gain(dev); ++ + mt76x0_phy_set_txpower(dev); + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, +@@ -741,39 +710,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, + return 0; + } + +-void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev) +-{ +- u32 tx_alc, reg_val; +- u8 channel = dev->mt76.chandef.chan->hw_value; +- int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; +- +- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); +- +- mt76x0_vco_cal(dev, channel); +- +- tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); +- mt76_wr(dev, MT_TX_ALC_CFG_0, 0); +- usleep_range(500, 700); +- +- reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); +- mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); +- +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false); +- +- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false); +- mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false); +- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); +- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false); +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false); +- mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false); +- +- mt76_wr(dev, MT_BBP(IBI, 9), reg_val); +- mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); +- msleep(100); +- +- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); +-} +- + static void mt76x0_temp_sensor(struct mt76x02_dev *dev) + { + u8 rf_b7_73, rf_b0_66, rf_b0_67; +@@ -817,10 +753,8 @@ done: + static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev) + { + u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; +- u32 val = 0x122c << 16 | 0xf2; + +- mt76_wr(dev, MT_BBP(AGC, 8), +- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain)); ++ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain); + } + + static void +@@ -835,7 +769,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); + +- gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); ++ gain_change = dev->cal.low_gain < 0 || ++ (dev->cal.low_gain & 2) ^ (low_gain & 2); + dev->cal.low_gain = low_gain; + + if (!gain_change) { +@@ -924,6 +859,7 @@ void mt76x0_phy_init(struct mt76x02_dev *dev) + { + INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work); + ++ mt76x0_ant_select(dev); + mt76x0_rf_init(dev); + mt76x02_phy_set_rxpath(dev); + mt76x02_phy_set_txdac(dev); +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +index a7fd36c2f633..ea517864186b 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +@@ -117,6 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw) + if (ret) + goto out; + ++ mt76x0_phy_calibrate(dev, true); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h +index 7806963b1905..9a5ae5c06840 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h +@@ -63,6 +63,7 @@ struct mt76x02_calibration { + bool tssi_comp_pending; + bool dpd_cal_done; + bool channel_cal_done; ++ bool gain_init_done; + }; + + struct mt76x02_dev { +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h +index b3ec74835d10..1de041590050 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h +@@ -25,6 +25,7 @@ enum mt76x02_eeprom_field { + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + MT_EE_PCI_ID = 0x00A, ++ MT_EE_ANTENNA = 0x022, + MT_EE_NIC_CONF_0 = 0x034, + MT_EE_NIC_CONF_1 = 0x036, + MT_EE_COUNTRY_REGION_5GHZ = 0x038, +@@ -104,6 +105,8 @@ enum mt76x02_eeprom_field { + __MT_EE_MAX + }; + ++#define MT_EE_ANTENNA_DUAL BIT(15) ++ + #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) + #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) + #define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) +@@ -118,12 +121,9 @@ enum mt76x02_eeprom_field { + #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) + #define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) + +-#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) +-#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) +-#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) ++#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3) ++#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4) + #define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) +-#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) +-#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) + + #define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ + MT_EE_USAGE_MAP_START + 1) +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +index 0f1d7b5c9f68..977a8e7e26df 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +@@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev) + memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, + sizeof(dev->cal.agc_gain_cur)); + dev->cal.low_gain = -1; ++ dev->cal.gain_init_done = true; + } + EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain); +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +index 1971a1b00038..9471b44ce558 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +@@ -156,6 +156,9 @@ mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + struct mt76x02_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); ++ ++ if (dev->cal.gain_init_done) ++ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0); + } + + const struct ieee80211_ops mt76x2u_ops = { +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c +index 583086dd9cb9..bfc5ef6d85b7 100644 +--- a/drivers/nvme/target/rdma.c ++++ b/drivers/nvme/target/rdma.c +@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); + static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); + static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); + static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); ++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, ++ struct nvmet_rdma_rsp *r); ++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, ++ struct nvmet_rdma_rsp *r); + + static const struct nvmet_fabrics_ops nvmet_rdma_ops; + +@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) + spin_unlock_irqrestore(&queue->rsps_lock, flags); + + if (unlikely(!rsp)) { +- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); ++ int ret; ++ ++ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); + if (unlikely(!rsp)) + return NULL; ++ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); ++ if (unlikely(ret)) { ++ kfree(rsp); ++ return NULL; ++ } ++ + rsp->allocated = true; + } + +@@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) + { + unsigned long flags; + +- if (rsp->allocated) { ++ if (unlikely(rsp->allocated)) { ++ nvmet_rdma_free_rsp(rsp->queue->dev, rsp); + kfree(rsp); + return; + } +diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c +index 194ffd5c8580..039b2074db7e 100644 +--- a/drivers/s390/char/sclp_config.c ++++ b/drivers/s390/char/sclp_config.c +@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) + + static void __ref sclp_cpu_change_notify(struct work_struct *work) + { ++ lock_device_hotplug(); + smp_rescan_cpus(); ++ unlock_device_hotplug(); + } + + static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index f1c57cd33b5b..1cb35ab8a4ec 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -110,13 +110,19 @@ + int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix) + { +- u8 *regs; ++ u32 *regs; ++ size_t pos; ++ ++ if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ ++ return -EINVAL; + + regs = kzalloc(len, GFP_KERNEL); + if (!regs) + return -ENOMEM; + +- memcpy_fromio(regs, hba->mmio_base + offset, len); ++ for (pos = 0; pos < len; pos += 4) ++ regs[pos / 4] = ufshcd_readl(hba, offset + pos); ++ + ufshcd_hex_dump(prefix, regs, len); + kfree(regs); + +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 28cbd6b3d26c..dfee6985efa6 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ ++ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c +index dabb391909aa..bb63519db7ae 100644 +--- a/drivers/tty/n_hdlc.c ++++ b/drivers/tty/n_hdlc.c +@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, + /* too large for caller's buffer */ + ret = -EOVERFLOW; + } else { ++ __set_current_state(TASK_RUNNING); + if (copy_to_user(buf, rbuf->buf, rbuf->count)) + ret = -EFAULT; + else +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index d4cca5bdaf1c..5c01bb6d1c24 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) + int ret = 0; + + circ = &state->xmit; +- if (!circ->buf) ++ port = uart_port_lock(state, flags); ++ if (!circ->buf) { ++ uart_port_unlock(port, flags); + return 0; ++ } + +- port = uart_port_lock(state, flags); + if (port && uart_circ_chars_free(circ) != 0) { + circ->buf[circ->head] = c; + circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); +@@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty, + return -EL3HLT; + } + ++ port = uart_port_lock(state, flags); + circ = &state->xmit; +- if (!circ->buf) ++ if (!circ->buf) { ++ uart_port_unlock(port, flags); + return 0; ++ } + +- port = uart_port_lock(state, flags); + while (port) { + c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); + if (count < c) +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 23c6fd238422..21ffcce16927 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) + ld = tty_ldisc_ref_wait(tty); + if (!ld) + return -EIO; +- ld->ops->receive_buf(tty, &ch, &mbz, 1); ++ if (ld->ops->receive_buf) ++ ld->ops->receive_buf(tty, &ch, &mbz, 1); + tty_ldisc_deref(ld); + return 0; + } +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 41ec8e5010f3..bba75560d11e 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + if (con_is_visible(vc)) + update_screen(vc); + vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); ++ notify_update(vc); + return err; + } + +@@ -2764,8 +2765,8 @@ rescan_last_byte: + con_flush(vc, draw_from, draw_to, &draw_x); + vc_uniscr_debug_check(vc); + console_conditional_schedule(); +- console_unlock(); + notify_update(vc); ++ console_unlock(); + return n; + } + +@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) + unsigned char c; + static DEFINE_SPINLOCK(printing_lock); + const ushort *start; +- ushort cnt = 0; +- ushort myx; ++ ushort start_x, cnt; + int kmsg_console; + + /* console busy or not yet initialized */ +@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) + if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) + vc = vc_cons[kmsg_console - 1].d; + +- /* read `x' only after setting currcons properly (otherwise +- the `x' macro will read the x of the foreground console). */ +- myx = vc->vc_x; +- + if (!vc_cons_allocated(fg_console)) { + /* impossible */ + /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ +@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) + hide_cursor(vc); + + start = (ushort *)vc->vc_pos; +- +- /* Contrived structure to try to emulate original need_wrap behaviour +- * Problems caused when we have need_wrap set on '\n' character */ ++ start_x = vc->vc_x; ++ cnt = 0; + while (count--) { + c = *b++; + if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { +- if (cnt > 0) { +- if (con_is_visible(vc)) +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); +- vc->vc_x += cnt; +- if (vc->vc_need_wrap) +- vc->vc_x--; +- cnt = 0; +- } ++ if (cnt && con_is_visible(vc)) ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); ++ cnt = 0; + if (c == 8) { /* backspace */ + bs(vc); + start = (ushort *)vc->vc_pos; +- myx = vc->vc_x; ++ start_x = vc->vc_x; + continue; + } + if (c != 13) + lf(vc); + cr(vc); + start = (ushort *)vc->vc_pos; +- myx = vc->vc_x; ++ start_x = vc->vc_x; + if (c == 10 || c == 13) + continue; + } ++ vc_uniscr_putc(vc, c); + scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); + notify_write(vc, c); + cnt++; +- if (myx == vc->vc_cols - 1) { +- vc->vc_need_wrap = 1; +- continue; +- } +- vc->vc_pos += 2; +- myx++; +- } +- if (cnt > 0) { +- if (con_is_visible(vc)) +- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); +- vc->vc_x += cnt; +- if (vc->vc_x == vc->vc_cols) { +- vc->vc_x--; ++ if (vc->vc_x == vc->vc_cols - 1) { + vc->vc_need_wrap = 1; ++ } else { ++ vc->vc_pos += 2; ++ vc->vc_x++; + } + } ++ if (cnt && con_is_visible(vc)) ++ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + set_cursor(vc); + notify_update(vc); + +diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c +index dc7f7fd71684..c12ac56606c3 100644 +--- a/drivers/usb/core/ledtrig-usbport.c ++++ b/drivers/usb/core/ledtrig-usbport.c +@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = { + .attrs = ports_attrs, + }; + +-static const struct attribute_group *ports_groups[] = { +- &ports_group, +- NULL +-}; +- + /*************************************** + * Adding & removing ports + ***************************************/ +@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, + static int usbport_trig_activate(struct led_classdev *led_cdev) + { + struct usbport_trig_data *usbport_data; ++ int err; + + usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); + if (!usbport_data) +@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) + + /* List of ports */ + INIT_LIST_HEAD(&usbport_data->ports); ++ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group); ++ if (err) ++ goto err_free; + usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); + usbport_trig_update_count(usbport_data); + +@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) + usbport_data->nb.notifier_call = usbport_trig_notify; + led_set_trigger_data(led_cdev, usbport_data); + usb_register_notify(&usbport_data->nb); +- + return 0; ++ ++err_free: ++ kfree(usbport_data); ++ return err; + } + + static void usbport_trig_deactivate(struct led_classdev *led_cdev) +@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev) + usbport_trig_remove_port(usbport_data, port); + } + ++ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group); ++ + usb_unregister_notify(&usbport_data->nb); + + kfree(usbport_data); +@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = { + .name = "usbport", + .activate = usbport_trig_activate, + .deactivate = usbport_trig_deactivate, +- .groups = ports_groups, + }; + + static int __init usbport_trig_init(void) +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 9f92ee03dde7..2a4ea9a1b1e3 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, + req->started = false; + list_del(&req->list); + req->remaining = 0; ++ req->unaligned = false; ++ req->zero = false; + + if (req->request.status == -EINPROGRESS) + req->request.status = status; +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c +index f26109eafdbf..66ec1fdf9fe7 100644 +--- a/drivers/usb/host/ehci-mv.c ++++ b/drivers/usb/host/ehci-mv.c +@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); + MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); + MODULE_ALIAS("mv-ehci"); + MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids); +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 609198d9594c..f459c1a18156 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) + int result; + u16 val; + ++ result = usb_autopm_get_interface(serial->interface); ++ if (result) ++ return result; ++ + val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; + result = usb_control_msg(serial->dev, + usb_sndctrlpipe(serial->dev, 0), +@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) + val, result); + } + ++ usb_autopm_put_interface(serial->interface); ++ + return result; + } + +@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) + unsigned char *buf; + int result; + ++ result = usb_autopm_get_interface(serial->interface); ++ if (result) ++ return result; ++ + buf = kmalloc(1, GFP_KERNEL); +- if (!buf) ++ if (!buf) { ++ usb_autopm_put_interface(serial->interface); + return -ENOMEM; ++ } + + result = usb_control_msg(serial->dev, + usb_rcvctrlpipe(serial->dev, 0), +@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) + } + + kfree(buf); ++ usb_autopm_put_interface(serial->interface); + + return result; + } +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 98e7a5df0f6d..bb3f9aa4a909 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index 4e2554d55362..559941ca884d 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -8,6 +8,7 @@ + + #define PL2303_VENDOR_ID 0x067b + #define PL2303_PRODUCT_ID 0x2303 ++#define PL2303_PRODUCT_ID_TB 0x2304 + #define PL2303_PRODUCT_ID_RSAQ2 0x04bb + #define PL2303_PRODUCT_ID_DCU11 0x1234 + #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 +@@ -20,6 +21,7 @@ + #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 + #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 + ++ + #define ATEN_VENDOR_ID 0x0557 + #define ATEN_VENDOR_ID2 0x0547 + #define ATEN_PRODUCT_ID 0x2008 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c +index 4d0273508043..edbbb13d6de6 100644 +--- a/drivers/usb/serial/usb-serial-simple.c ++++ b/drivers/usb/serial/usb-serial-simple.c +@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS); + /* Motorola Tetra driver */ + #define MOTOROLA_TETRA_IDS() \ + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ +- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ ++ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ ++ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); + + /* Novatel Wireless GPS driver */ +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index ad7a6f475a44..784df2b49628 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -1192,7 +1192,8 @@ static void handle_rx(struct vhost_net *net) + if (nvq->done_idx > VHOST_NET_BATCH) + vhost_net_signal_used(nvq); + if (unlikely(vq_log)) +- vhost_log_write(vq, vq_log, log, vhost_len); ++ vhost_log_write(vq, vq_log, log, vhost_len, ++ vq->iov, in); + total_len += vhost_len; + if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { + vhost_poll_queue(&vq->poll); +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 55e5aa662ad5..c66fc8308b5e 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -1733,13 +1733,87 @@ static int log_write(void __user *log_base, + return r; + } + ++static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) ++{ ++ struct vhost_umem *umem = vq->umem; ++ struct vhost_umem_node *u; ++ u64 start, end, l, min; ++ int r; ++ bool hit = false; ++ ++ while (len) { ++ min = len; ++ /* More than one GPAs can be mapped into a single HVA. So ++ * iterate all possible umems here to be safe. ++ */ ++ list_for_each_entry(u, &umem->umem_list, link) { ++ if (u->userspace_addr > hva - 1 + len || ++ u->userspace_addr - 1 + u->size < hva) ++ continue; ++ start = max(u->userspace_addr, hva); ++ end = min(u->userspace_addr - 1 + u->size, ++ hva - 1 + len); ++ l = end - start + 1; ++ r = log_write(vq->log_base, ++ u->start + start - u->userspace_addr, ++ l); ++ if (r < 0) ++ return r; ++ hit = true; ++ min = min(l, min); ++ } ++ ++ if (!hit) ++ return -EFAULT; ++ ++ len -= min; ++ hva += min; ++ } ++ ++ return 0; ++} ++ ++static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) ++{ ++ struct iovec iov[64]; ++ int i, ret; ++ ++ if (!vq->iotlb) ++ return log_write(vq->log_base, vq->log_addr + used_offset, len); ++ ++ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, ++ len, iov, 64, VHOST_ACCESS_WO); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < ret; i++) { ++ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, ++ iov[i].iov_len); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, +- unsigned int log_num, u64 len) ++ unsigned int log_num, u64 len, struct iovec *iov, int count) + { + int i, r; + + /* Make sure data written is seen before log. */ + smp_wmb(); ++ ++ if (vq->iotlb) { ++ for (i = 0; i < count; i++) { ++ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, ++ iov[i].iov_len); ++ if (r < 0) ++ return r; ++ } ++ return 0; ++ } ++ + for (i = 0; i < log_num; ++i) { + u64 l = min(log[i].len, len); + r = log_write(vq->log_base, log[i].addr, l); +@@ -1769,9 +1843,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) + smp_wmb(); + /* Log used flag write. */ + used = &vq->used->flags; +- log_write(vq->log_base, vq->log_addr + +- (used - (void __user *)vq->used), +- sizeof vq->used->flags); ++ log_used(vq, (used - (void __user *)vq->used), ++ sizeof vq->used->flags); + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); + } +@@ -1789,9 +1862,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) + smp_wmb(); + /* Log avail event write */ + used = vhost_avail_event(vq); +- log_write(vq->log_base, vq->log_addr + +- (used - (void __user *)vq->used), +- sizeof *vhost_avail_event(vq)); ++ log_used(vq, (used - (void __user *)vq->used), ++ sizeof *vhost_avail_event(vq)); + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); + } +@@ -2191,10 +2263,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, + /* Make sure data is seen before log. */ + smp_wmb(); + /* Log used ring entry write. */ +- log_write(vq->log_base, +- vq->log_addr + +- ((void __user *)used - (void __user *)vq->used), +- count * sizeof *used); ++ log_used(vq, ((void __user *)used - (void __user *)vq->used), ++ count * sizeof *used); + } + old = vq->last_used_idx; + new = (vq->last_used_idx += count); +@@ -2236,9 +2306,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, + /* Make sure used idx is seen before log. */ + smp_wmb(); + /* Log used index update. */ +- log_write(vq->log_base, +- vq->log_addr + offsetof(struct vring_used, idx), +- sizeof vq->used->idx); ++ log_used(vq, offsetof(struct vring_used, idx), ++ sizeof vq->used->idx); + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); + } +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h +index 466ef7542291..1b675dad5e05 100644 +--- a/drivers/vhost/vhost.h ++++ b/drivers/vhost/vhost.h +@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); + bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); + + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, +- unsigned int log_num, u64 len); ++ unsigned int log_num, u64 len, ++ struct iovec *iov, int count); + int vq_iotlb_prefetch(struct vhost_virtqueue *vq); + + struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index 09731b2f6815..c6b3bdbbdbc9 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count) + + static void vgacon_restore_screen(struct vc_data *c) + { ++ c->vc_origin = c->vc_visible_origin; + vgacon_scrollback_cur->save = 0; + + if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { +@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) + int start, end, count, soff; + + if (!lines) { +- c->vc_visible_origin = c->vc_origin; +- vga_set_mem_top(c); ++ vgacon_restore_screen(c); + return; + } + +@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) + if (!vgacon_scrollback_cur->save) { + vgacon_cursor(c, CM_ERASE); + vgacon_save_screen(c); ++ c->vc_origin = (unsigned long)c->vc_screenbuf; + vgacon_scrollback_cur->save = 1; + } + +@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) + int copysize; + + int diff = c->vc_rows - count; +- void *d = (void *) c->vc_origin; ++ void *d = (void *) c->vc_visible_origin; + void *s = (void *) c->vc_screenbuf; + + count *= c->vc_size_row; +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index a58666a3f8dd..08aaf580fa1c 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -1032,6 +1032,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci) + list_del_init(&ci->i_snap_realm_item); + ci->i_snap_realm_counter++; + ci->i_snap_realm = NULL; ++ if (realm->ino == ci->i_vino.ino) ++ realm->inode = NULL; + spin_unlock(&realm->inodes_with_caps_lock); + ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, + realm); +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index f82fd342bca5..fce610f6cd24 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) + } + + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, ++ bool malformed) + { + int length; +- struct cifs_readdata *rdata = mid->callback_data; + + length = cifs_discard_remaining_data(server); +- dequeue_mid(mid, rdata->result); ++ dequeue_mid(mid, malformed); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; + return length; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ return __cifs_readv_discard(server, mid, rdata->result); ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return -1; + } + ++ /* set up first two iov for signature check and to get credits */ ++ rdata->iov[0].iov_base = buf; ++ rdata->iov[0].iov_len = 4; ++ rdata->iov[1].iov_base = buf + 4; ++ rdata->iov[1].iov_len = server->total_read - 4; ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len); ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len); ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { + cifs_dbg(FYI, "%s: server returned error %d\n", + __func__, rdata->result); +- return cifs_readv_discard(server, mid); ++ /* normal error on read response */ ++ return __cifs_readv_discard(server, mid, false); + } + + /* Is there enough to get to the rest of the READ_RSP header? */ +@@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + server->total_read += length; + } + +- /* set up first iov for signature check */ +- rdata->iov[0].iov_base = buf; +- rdata->iov[0].iov_len = 4; +- rdata->iov[1].iov_base = buf + 4; +- rdata->iov[1].iov_len = server->total_read - 4; +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", +- rdata->iov[0].iov_base, server->total_read); +- + /* how much data is in the response? */ + #ifdef CONFIG_CIFS_SMB_DIRECT + use_rdma_mr = rdata->mr; +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 6f24f129a751..b83ab72cf855 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -534,6 +534,21 @@ server_unresponsive(struct TCP_Server_Info *server) + return false; + } + ++static inline bool ++zero_credits(struct TCP_Server_Info *server) ++{ ++ int val; ++ ++ spin_lock(&server->req_lock); ++ val = server->credits + server->echo_credits + server->oplock_credits; ++ if (server->in_flight == 0 && val == 0) { ++ spin_unlock(&server->req_lock); ++ return true; ++ } ++ spin_unlock(&server->req_lock); ++ return false; ++} ++ + static int + cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) + { +@@ -546,6 +561,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) + for (total_read = 0; msg_data_left(smb_msg); total_read += length) { + try_to_freeze(); + ++ /* reconnect if no credits and no requests in flight */ ++ if (zero_credits(server)) { ++ cifs_reconnect(server); ++ return -ECONNABORTED; ++ } ++ + if (server_unresponsive(server)) + return -ECONNABORTED; + if (cifs_rdma_enabled(server) && server->smbd_conn) +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index 6a9c47541c53..7b8b58fb4d3f 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) + if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) + return false; + ++ if (rsp->sync_hdr.CreditRequest) { ++ spin_lock(&server->req_lock); ++ server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest); ++ spin_unlock(&server->req_lock); ++ wake_up(&server->request_q); ++ } ++ + if (rsp->StructureSize != + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { + if (le16_to_cpu(rsp->StructureSize) == 44) +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 391b40e91910..d7dd7d38fad6 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -34,6 +34,7 @@ + #include "cifs_ioctl.h" + #include "smbdirect.h" + ++/* Change credits for different ops and return the total number of credits */ + static int + change_conf(struct TCP_Server_Info *server) + { +@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server) + server->oplock_credits = server->echo_credits = 0; + switch (server->credits) { + case 0: +- return -1; ++ return 0; + case 1: + server->echoes = false; + server->oplocks = false; +- cifs_dbg(VFS, "disabling echoes and oplocks\n"); + break; + case 2: + server->echoes = true; + server->oplocks = false; + server->echo_credits = 1; +- cifs_dbg(FYI, "disabling oplocks\n"); + break; + default: + server->echoes = true; +@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server) + server->echo_credits = 1; + } + server->credits -= server->echo_credits + server->oplock_credits; +- return 0; ++ return server->credits + server->echo_credits + server->oplock_credits; + } + + static void + smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, + const int optype) + { +- int *val, rc = 0; ++ int *val, rc = -1; ++ + spin_lock(&server->req_lock); + val = server->ops->get_credits_field(server, optype); + +@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, + } + spin_unlock(&server->req_lock); + wake_up(&server->request_q); +- if (rc) +- cifs_reconnect(server); ++ ++ if (server->tcpStatus == CifsNeedReconnect) ++ return; ++ ++ switch (rc) { ++ case -1: ++ /* change_conf hasn't been executed */ ++ break; ++ case 0: ++ cifs_dbg(VFS, "Possible client or server bug - zero credits\n"); ++ break; ++ case 1: ++ cifs_dbg(VFS, "disabling echoes and oplocks\n"); ++ break; ++ case 2: ++ cifs_dbg(FYI, "disabling oplocks\n"); ++ break; ++ default: ++ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc); ++ } + } + + static void +@@ -165,14 +183,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, + + scredits = server->credits; + /* can deadlock with reopen */ +- if (scredits == 1) { ++ if (scredits <= 8) { + *num = SMB2_MAX_BUFFER_SIZE; + *credits = 0; + break; + } + +- /* leave one credit for a possible reopen */ +- scredits--; ++ /* leave some credits for reopen and other ops */ ++ scredits -= 8; + *num = min_t(unsigned int, size, + scredits * SMB2_MAX_BUFFER_SIZE); + +@@ -3101,11 +3119,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, + server->ops->is_status_pending(buf, server, 0)) + return -1; + +- rdata->result = server->ops->map_error(buf, false); ++ /* set up first two iov to get credits */ ++ rdata->iov[0].iov_base = buf; ++ rdata->iov[0].iov_len = 4; ++ rdata->iov[1].iov_base = buf + 4; ++ rdata->iov[1].iov_len = ++ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4; ++ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", ++ rdata->iov[0].iov_base, rdata->iov[0].iov_len); ++ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", ++ rdata->iov[1].iov_base, rdata->iov[1].iov_len); ++ ++ rdata->result = server->ops->map_error(buf, true); + if (rdata->result != 0) { + cifs_dbg(FYI, "%s: server returned error %d\n", + __func__, rdata->result); +- dequeue_mid(mid, rdata->result); ++ /* normal error on read response */ ++ dequeue_mid(mid, false); + return 0; + } + +@@ -3178,14 +3208,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, + return 0; + } + +- /* set up first iov for signature check */ +- rdata->iov[0].iov_base = buf; +- rdata->iov[0].iov_len = 4; +- rdata->iov[1].iov_base = buf + 4; +- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4; +- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", +- rdata->iov[0].iov_base, server->vals->read_rsp_size); +- + length = rdata->copy_into_pages(server, rdata, &iter); + + kfree(bvec); +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 105576daca4a..798f1253141a 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, + return -EBADF; + + /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ +- if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) +- return -EINVAL; ++ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { ++ ret = -EINVAL; ++ goto fput_and_out; ++ } + + /* verify that this is indeed an inotify instance */ + if (unlikely(f.file->f_op != &inotify_fops)) { +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index d93e89761a8b..a6349a29748c 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -147,6 +147,7 @@ struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + u32 curframe; ++ bool speculative; + }; + + #define bpf_get_spilled_reg(slot, frame) \ +@@ -166,15 +167,25 @@ struct bpf_verifier_state_list { + struct bpf_verifier_state_list *next; + }; + ++/* Possible states for alu_state member. */ ++#define BPF_ALU_SANITIZE_SRC 1U ++#define BPF_ALU_SANITIZE_DST 2U ++#define BPF_ALU_NEG_VALUE (1U << 2) ++#define BPF_ALU_NON_POINTER (1U << 3) ++#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ ++ BPF_ALU_SANITIZE_DST) ++ + struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ + unsigned long map_state; /* pointer/poison value for maps */ + s32 call_imm; /* saved imm field of call insn */ ++ u32 alu_limit; /* limit for add/sub register with pointer */ + }; + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ + int sanitize_stack_off; /* stack slot to be cleared */ + bool seen; /* this insn was processed by the verifier */ ++ u8 alu_state; /* used in combination with alu_limit */ + }; + + #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ +@@ -210,6 +221,8 @@ struct bpf_subprog_info { + * one verifier_env per bpf_check() call + */ + struct bpf_verifier_env { ++ u32 insn_idx; ++ u32 prev_insn_idx; + struct bpf_prog *prog; /* eBPF program being verified */ + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 25a556589ae8..b776626aeb84 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -53,14 +53,10 @@ struct sock_reuseport; + #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ + #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ + +-/* Kernel hidden auxiliary/helper register for hardening step. +- * Only used by eBPF JITs. It's nothing more than a temporary +- * register that JITs use internally, only that here it's part +- * of eBPF instructions that have been rewritten for blinding +- * constants. See JIT pre-step in bpf_jit_blind_constants(). +- */ ++/* Kernel hidden auxiliary/helper register. */ + #define BPF_REG_AX MAX_BPF_REG +-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) ++#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) ++#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG + + /* unused opcode to mark special call to bpf_tail_call() helper */ + #define BPF_TAIL_CALL 0xf0 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h +index 14131b6fae68..dcb6977afce9 100644 +--- a/include/linux/hyperv.h ++++ b/include/linux/hyperv.h +@@ -830,15 +830,6 @@ struct vmbus_channel { + * All Sub-channels of a primary channel are linked here. + */ + struct list_head sc_list; +- /* +- * Current number of sub-channels. +- */ +- int num_sc; +- /* +- * Number of a sub-channel (position within sc_list) which is supposed +- * to be used as the next outgoing channel. +- */ +- int next_oc; + /* + * The primary channel this sub-channel belongs to. + * This will be NULL for the primary channel. +@@ -972,14 +963,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, + void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, + void (*chn_rescind_cb)(struct vmbus_channel *)); + +-/* +- * Retrieve the (sub) channel on which to send an outgoing request. +- * When a primary channel has multiple sub-channels, we choose a +- * channel whose VCPU binding is closest to the VCPU on which +- * this call is being made. +- */ +-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); +- + /* + * Check if sub-channels have already been offerred. This API will be useful + * when the driver is unloaded after establishing sub-channels. In this case, +@@ -1176,8 +1159,9 @@ struct hv_ring_buffer_debug_info { + u32 bytes_avail_towrite; + }; + +-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, +- struct hv_ring_buffer_debug_info *debug_info); ++ ++int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, ++ struct hv_ring_buffer_debug_info *debug_info); + + /* Vmbus interface */ + #define vmbus_driver_register(driver) \ +diff --git a/include/linux/phy.h b/include/linux/phy.h +index 306630d13523..f5d4235e3844 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -502,8 +502,8 @@ struct phy_device { + * only works for PHYs with IDs which match this field + * name: The friendly name of this PHY type + * phy_id_mask: Defines the important bits of the phy_id +- * features: A list of features (speed, duplex, etc) supported +- * by this PHY ++ * features: A mandatory list of features (speed, duplex, etc) ++ * supported by this PHY + * flags: A bitfield defining certain other features this PHY + * supports (like interrupts) + * +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 0d1b2c3f127b..a6d820ad17f0 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -3204,6 +3204,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. ++ * It can change skb pointers. + */ + + static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index c5969762a8f4..9c8214d2116d 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, + struct netlink_ext_ack *extack); + int fib_table_dump(struct fib_table *table, struct sk_buff *skb, + struct netlink_callback *cb, struct fib_dump_filter *filter); +-int fib_table_flush(struct net *net, struct fib_table *table); ++int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); + struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); + void fib_table_flush_external(struct fib_table *table); + void fib_free_table(struct fib_table *tb); +diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h +index fb78f6f500f3..f056b2a00d5c 100644 +--- a/include/uapi/linux/input.h ++++ b/include/uapi/linux/input.h +@@ -26,13 +26,17 @@ + */ + + struct input_event { +-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) ++#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__) + struct timeval time; + #define input_event_sec time.tv_sec + #define input_event_usec time.tv_usec + #else + __kernel_ulong_t __sec; ++#if defined(__sparc__) && defined(__arch64__) ++ unsigned int __usec; ++#else + __kernel_ulong_t __usec; ++#endif + #define input_event_sec __sec + #define input_event_usec __usec + #endif +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index b2890c268cb3..ac44653025ad 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -52,6 +52,7 @@ + #define DST regs[insn->dst_reg] + #define SRC regs[insn->src_reg] + #define FP regs[BPF_REG_FP] ++#define AX regs[BPF_REG_AX] + #define ARG1 regs[BPF_REG_ARG1] + #define CTX regs[BPF_REG_CTX] + #define IMM insn->imm +@@ -726,6 +727,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, + BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); + BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); + ++ /* Constraints on AX register: ++ * ++ * AX register is inaccessible from user space. It is mapped in ++ * all JITs, and used here for constant blinding rewrites. It is ++ * typically "stateless" meaning its contents are only valid within ++ * the executed instruction, but not across several instructions. ++ * There are a few exceptions however which are further detailed ++ * below. ++ * ++ * Constant blinding is only used by JITs, not in the interpreter. ++ * The interpreter uses AX in some occasions as a local temporary ++ * register e.g. in DIV or MOD instructions. ++ * ++ * In restricted circumstances, the verifier can also use the AX ++ * register for rewrites as long as they do not interfere with ++ * the above cases! ++ */ ++ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) ++ goto out; ++ + if (from->imm == 0 && + (from->code == (BPF_ALU | BPF_MOV | BPF_K) || + from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { +@@ -1055,7 +1076,6 @@ bool bpf_opcode_in_insntable(u8 code) + */ + static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) + { +- u64 tmp; + #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y + #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z + static const void *jumptable[256] = { +@@ -1129,36 +1149,36 @@ select_insn: + (*(s64 *) &DST) >>= IMM; + CONT; + ALU64_MOD_X: +- div64_u64_rem(DST, SRC, &tmp); +- DST = tmp; ++ div64_u64_rem(DST, SRC, &AX); ++ DST = AX; + CONT; + ALU_MOD_X: +- tmp = (u32) DST; +- DST = do_div(tmp, (u32) SRC); ++ AX = (u32) DST; ++ DST = do_div(AX, (u32) SRC); + CONT; + ALU64_MOD_K: +- div64_u64_rem(DST, IMM, &tmp); +- DST = tmp; ++ div64_u64_rem(DST, IMM, &AX); ++ DST = AX; + CONT; + ALU_MOD_K: +- tmp = (u32) DST; +- DST = do_div(tmp, (u32) IMM); ++ AX = (u32) DST; ++ DST = do_div(AX, (u32) IMM); + CONT; + ALU64_DIV_X: + DST = div64_u64(DST, SRC); + CONT; + ALU_DIV_X: +- tmp = (u32) DST; +- do_div(tmp, (u32) SRC); +- DST = (u32) tmp; ++ AX = (u32) DST; ++ do_div(AX, (u32) SRC); ++ DST = (u32) AX; + CONT; + ALU64_DIV_K: + DST = div64_u64(DST, IMM); + CONT; + ALU_DIV_K: +- tmp = (u32) DST; +- do_div(tmp, (u32) IMM); +- DST = (u32) tmp; ++ AX = (u32) DST; ++ do_div(AX, (u32) IMM); ++ DST = (u32) AX; + CONT; + ALU_END_TO_BE: + switch (IMM) { +@@ -1414,7 +1434,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ + static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ + { \ + u64 stack[stack_size / sizeof(u64)]; \ +- u64 regs[MAX_BPF_REG]; \ ++ u64 regs[MAX_BPF_EXT_REG]; \ + \ + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ + ARG1 = (u64) (unsigned long) ctx; \ +@@ -1427,7 +1447,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ + const struct bpf_insn *insn) \ + { \ + u64 stack[stack_size / sizeof(u64)]; \ +- u64 regs[MAX_BPF_REG]; \ ++ u64 regs[MAX_BPF_EXT_REG]; \ + \ + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ + BPF_R1 = r1; \ +diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c +index 99d243e1ad6e..52378d3e34b3 100644 +--- a/kernel/bpf/map_in_map.c ++++ b/kernel/bpf/map_in_map.c +@@ -12,6 +12,7 @@ + struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) + { + struct bpf_map *inner_map, *inner_map_meta; ++ u32 inner_map_meta_size; + struct fd f; + + f = fdget(inner_map_ufd); +@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) + return ERR_PTR(-EINVAL); + } + +- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); ++ inner_map_meta_size = sizeof(*inner_map_meta); ++ /* In some cases verifier needs to access beyond just base map. */ ++ if (inner_map->ops == &array_map_ops) ++ inner_map_meta_size = sizeof(struct bpf_array); ++ ++ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); + if (!inner_map_meta) { + fdput(f); + return ERR_PTR(-ENOMEM); +@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) + inner_map_meta->key_size = inner_map->key_size; + inner_map_meta->value_size = inner_map->value_size; + inner_map_meta->map_flags = inner_map->map_flags; +- inner_map_meta->ops = inner_map->ops; + inner_map_meta->max_entries = inner_map->max_entries; + ++ /* Misc members not needed in bpf_map_meta_equal() check. */ ++ inner_map_meta->ops = inner_map->ops; ++ if (inner_map->ops == &array_map_ops) { ++ inner_map_meta->unpriv_array = inner_map->unpriv_array; ++ container_of(inner_map_meta, struct bpf_array, map)->index_mask = ++ container_of(inner_map, struct bpf_array, map)->index_mask; ++ } ++ + fdput(f); + return inner_map_meta; + } +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index eedc7bd4185d..e4c826229152 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -648,6 +648,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, + free_func_state(dst_state->frame[i]); + dst_state->frame[i] = NULL; + } ++ dst_state->speculative = src->speculative; + dst_state->curframe = src->curframe; + for (i = 0; i <= src->curframe; i++) { + dst = dst_state->frame[i]; +@@ -692,7 +693,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, + } + + static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, +- int insn_idx, int prev_insn_idx) ++ int insn_idx, int prev_insn_idx, ++ bool speculative) + { + struct bpf_verifier_state *cur = env->cur_state; + struct bpf_verifier_stack_elem *elem; +@@ -710,6 +712,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, + err = copy_verifier_state(&elem->st, cur); + if (err) + goto err; ++ elem->st.speculative |= speculative; + if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { + verbose(env, "BPF program is too complex\n"); + goto err; +@@ -1314,6 +1317,31 @@ static int check_stack_read(struct bpf_verifier_env *env, + } + } + ++static int check_stack_access(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ int off, int size) ++{ ++ /* Stack accesses must be at a fixed offset, so that we ++ * can determine what type of data were returned. See ++ * check_stack_read(). ++ */ ++ if (!tnum_is_const(reg->var_off)) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "variable stack access var_off=%s off=%d size=%d", ++ tn_buf, off, size); ++ return -EACCES; ++ } ++ ++ if (off >= 0 || off < -MAX_BPF_STACK) { ++ verbose(env, "invalid stack off=%d size=%d\n", off, size); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ + /* check read/write into map element returned by bpf_map_lookup_elem() */ + static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, + int size, bool zero_size_allowed) +@@ -1345,13 +1373,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, + */ + if (env->log.level) + print_verifier_state(env, state); ++ + /* The minimum value is only important with signed + * comparisons where we can't assume the floor of a + * value is 0. If we are using signed variables for our + * index'es we need to make sure that whatever we use + * will have a set floor within our range. + */ +- if (reg->smin_value < 0) { ++ if (reg->smin_value < 0 && ++ (reg->smin_value == S64_MIN || ++ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || ++ reg->smin_value + off < 0)) { + verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", + regno); + return -EACCES; +@@ -1870,24 +1902,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn + } + + } else if (reg->type == PTR_TO_STACK) { +- /* stack accesses must be at a fixed offset, so that we can +- * determine what type of data were returned. +- * See check_stack_read(). +- */ +- if (!tnum_is_const(reg->var_off)) { +- char tn_buf[48]; +- +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); +- verbose(env, "variable stack access var_off=%s off=%d size=%d", +- tn_buf, off, size); +- return -EACCES; +- } + off += reg->var_off.value; +- if (off >= 0 || off < -MAX_BPF_STACK) { +- verbose(env, "invalid stack off=%d size=%d\n", off, +- size); +- return -EACCES; +- } ++ err = check_stack_access(env, reg, off, size); ++ if (err) ++ return err; + + state = func(env, reg); + err = update_stack_depth(env, state, off); +@@ -2968,6 +2986,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env, + return true; + } + ++static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) ++{ ++ return &env->insn_aux_data[env->insn_idx]; ++} ++ ++static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, ++ u32 *ptr_limit, u8 opcode, bool off_is_neg) ++{ ++ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || ++ (opcode == BPF_SUB && !off_is_neg); ++ u32 off; ++ ++ switch (ptr_reg->type) { ++ case PTR_TO_STACK: ++ off = ptr_reg->off + ptr_reg->var_off.value; ++ if (mask_to_left) ++ *ptr_limit = MAX_BPF_STACK + off; ++ else ++ *ptr_limit = -off; ++ return 0; ++ case PTR_TO_MAP_VALUE: ++ if (mask_to_left) { ++ *ptr_limit = ptr_reg->umax_value + ptr_reg->off; ++ } else { ++ off = ptr_reg->smin_value + ptr_reg->off; ++ *ptr_limit = ptr_reg->map_ptr->value_size - off; ++ } ++ return 0; ++ default: ++ return -EINVAL; ++ } ++} ++ ++static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, ++ const struct bpf_insn *insn) ++{ ++ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; ++} ++ ++static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, ++ u32 alu_state, u32 alu_limit) ++{ ++ /* If we arrived here from different branches with different ++ * state or limits to sanitize, then this won't work. ++ */ ++ if (aux->alu_state && ++ (aux->alu_state != alu_state || ++ aux->alu_limit != alu_limit)) ++ return -EACCES; ++ ++ /* Corresponding fixup done in fixup_bpf_calls(). */ ++ aux->alu_state = alu_state; ++ aux->alu_limit = alu_limit; ++ return 0; ++} ++ ++static int sanitize_val_alu(struct bpf_verifier_env *env, ++ struct bpf_insn *insn) ++{ ++ struct bpf_insn_aux_data *aux = cur_aux(env); ++ ++ if (can_skip_alu_sanitation(env, insn)) ++ return 0; ++ ++ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); ++} ++ ++static int sanitize_ptr_alu(struct bpf_verifier_env *env, ++ struct bpf_insn *insn, ++ const struct bpf_reg_state *ptr_reg, ++ struct bpf_reg_state *dst_reg, ++ bool off_is_neg) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_insn_aux_data *aux = cur_aux(env); ++ bool ptr_is_dst_reg = ptr_reg == dst_reg; ++ u8 opcode = BPF_OP(insn->code); ++ u32 alu_state, alu_limit; ++ struct bpf_reg_state tmp; ++ bool ret; ++ ++ if (can_skip_alu_sanitation(env, insn)) ++ return 0; ++ ++ /* We already marked aux for masking from non-speculative ++ * paths, thus we got here in the first place. We only care ++ * to explore bad access from here. ++ */ ++ if (vstate->speculative) ++ goto do_sim; ++ ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; ++ alu_state |= ptr_is_dst_reg ? ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; ++ ++ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) ++ return 0; ++ if (update_alu_sanitation_state(aux, alu_state, alu_limit)) ++ return -EACCES; ++do_sim: ++ /* Simulate and find potential out-of-bounds access under ++ * speculative execution from truncation as a result of ++ * masking when off was not within expected range. If off ++ * sits in dst, then we temporarily need to move ptr there ++ * to simulate dst (== 0) +/-= ptr. Needed, for example, ++ * for cases where we use K-based arithmetic in one direction ++ * and truncated reg-based in the other in order to explore ++ * bad access. ++ */ ++ if (!ptr_is_dst_reg) { ++ tmp = *dst_reg; ++ *dst_reg = *ptr_reg; ++ } ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); ++ if (!ptr_is_dst_reg) ++ *dst_reg = tmp; ++ return !ret ? -EFAULT : 0; ++} ++ + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. + * Caller should also handle BPF_MOV case separately. + * If we return -EACCES, caller may want to try again treating pointer as a +@@ -2986,8 +3123,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; ++ u32 dst = insn->dst_reg, src = insn->src_reg; + u8 opcode = BPF_OP(insn->code); +- u32 dst = insn->dst_reg; ++ int ret; + + dst_reg = ®s[dst]; + +@@ -3020,6 +3158,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + verbose(env, "R%d pointer arithmetic on %s prohibited\n", + dst, reg_type_str[ptr_reg->type]); + return -EACCES; ++ case PTR_TO_MAP_VALUE: ++ if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", ++ off_reg == dst_reg ? dst : src); ++ return -EACCES; ++ } ++ /* fall-through */ + default: + break; + } +@@ -3036,6 +3181,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + + switch (opcode) { + case BPF_ADD: ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ++ if (ret < 0) { ++ verbose(env, "R%d tried to add from different maps or paths\n", dst); ++ return ret; ++ } + /* We can take a fixed offset as long as it doesn't overflow + * the s32 'off' field + */ +@@ -3086,6 +3236,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + } + break; + case BPF_SUB: ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ++ if (ret < 0) { ++ verbose(env, "R%d tried to sub from different maps or paths\n", dst); ++ return ret; ++ } + if (dst_reg == off_reg) { + /* scalar -= pointer. Creates an unknown scalar */ + verbose(env, "R%d tried to subtract pointer from scalar\n", +@@ -3165,6 +3320,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + __update_reg_bounds(dst_reg); + __reg_deduce_bounds(dst_reg); + __reg_bound_offset(dst_reg); ++ ++ /* For unprivileged we require that resulting offset must be in bounds ++ * in order to be able to sanitize access later on. ++ */ ++ if (!env->allow_ptr_leaks) { ++ if (dst_reg->type == PTR_TO_MAP_VALUE && ++ check_map_access(env, dst, dst_reg->off, 1, false)) { ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } else if (dst_reg->type == PTR_TO_STACK && ++ check_stack_access(env, dst_reg, dst_reg->off + ++ dst_reg->var_off.value, 1)) { ++ verbose(env, "R%d stack pointer arithmetic goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } ++ } ++ + return 0; + } + +@@ -3183,6 +3357,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, + s64 smin_val, smax_val; + u64 umin_val, umax_val; + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; ++ u32 dst = insn->dst_reg; ++ int ret; + + if (insn_bitness == 32) { + /* Relevant for 32-bit RSH: Information can propagate towards +@@ -3217,6 +3393,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, + + switch (opcode) { + case BPF_ADD: ++ ret = sanitize_val_alu(env, insn); ++ if (ret < 0) { ++ verbose(env, "R%d tried to add from different pointers or scalars\n", dst); ++ return ret; ++ } + if (signed_add_overflows(dst_reg->smin_value, smin_val) || + signed_add_overflows(dst_reg->smax_value, smax_val)) { + dst_reg->smin_value = S64_MIN; +@@ -3236,6 +3417,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); + break; + case BPF_SUB: ++ ret = sanitize_val_alu(env, insn); ++ if (ret < 0) { ++ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); ++ return ret; ++ } + if (signed_sub_overflows(dst_reg->smin_value, smax_val) || + signed_sub_overflows(dst_reg->smax_value, smin_val)) { + /* Overflow possible, we know nothing */ +@@ -4249,7 +4435,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, + } + } + +- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); ++ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, ++ false); + if (!other_branch) + return -EFAULT; + other_branch_regs = other_branch->frame[other_branch->curframe]->regs; +@@ -4990,6 +5177,12 @@ static bool states_equal(struct bpf_verifier_env *env, + if (old->curframe != cur->curframe) + return false; + ++ /* Verification state from speculative execution simulation ++ * must never prune a non-speculative execution one. ++ */ ++ if (old->speculative && !cur->speculative) ++ return false; ++ + /* for states to be equal callsites have to be the same + * and all frame states need to be equivalent + */ +@@ -5180,7 +5373,6 @@ static int do_check(struct bpf_verifier_env *env) + struct bpf_insn *insns = env->prog->insnsi; + struct bpf_reg_state *regs; + int insn_cnt = env->prog->len, i; +- int insn_idx, prev_insn_idx = 0; + int insn_processed = 0; + bool do_print_state = false; + +@@ -5188,6 +5380,7 @@ static int do_check(struct bpf_verifier_env *env) + if (!state) + return -ENOMEM; + state->curframe = 0; ++ state->speculative = false; + state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); + if (!state->frame[0]) { + kfree(state); +@@ -5198,19 +5391,19 @@ static int do_check(struct bpf_verifier_env *env) + BPF_MAIN_FUNC /* callsite */, + 0 /* frameno */, + 0 /* subprogno, zero == main subprog */); +- insn_idx = 0; ++ + for (;;) { + struct bpf_insn *insn; + u8 class; + int err; + +- if (insn_idx >= insn_cnt) { ++ if (env->insn_idx >= insn_cnt) { + verbose(env, "invalid insn idx %d insn_cnt %d\n", +- insn_idx, insn_cnt); ++ env->insn_idx, insn_cnt); + return -EFAULT; + } + +- insn = &insns[insn_idx]; ++ insn = &insns[env->insn_idx]; + class = BPF_CLASS(insn->code); + + if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { +@@ -5220,17 +5413,19 @@ static int do_check(struct bpf_verifier_env *env) + return -E2BIG; + } + +- err = is_state_visited(env, insn_idx); ++ err = is_state_visited(env, env->insn_idx); + if (err < 0) + return err; + if (err == 1) { + /* found equivalent state, can prune the search */ + if (env->log.level) { + if (do_print_state) +- verbose(env, "\nfrom %d to %d: safe\n", +- prev_insn_idx, insn_idx); ++ verbose(env, "\nfrom %d to %d%s: safe\n", ++ env->prev_insn_idx, env->insn_idx, ++ env->cur_state->speculative ? ++ " (speculative execution)" : ""); + else +- verbose(env, "%d: safe\n", insn_idx); ++ verbose(env, "%d: safe\n", env->insn_idx); + } + goto process_bpf_exit; + } +@@ -5243,10 +5438,12 @@ static int do_check(struct bpf_verifier_env *env) + + if (env->log.level > 1 || (env->log.level && do_print_state)) { + if (env->log.level > 1) +- verbose(env, "%d:", insn_idx); ++ verbose(env, "%d:", env->insn_idx); + else +- verbose(env, "\nfrom %d to %d:", +- prev_insn_idx, insn_idx); ++ verbose(env, "\nfrom %d to %d%s:", ++ env->prev_insn_idx, env->insn_idx, ++ env->cur_state->speculative ? ++ " (speculative execution)" : ""); + print_verifier_state(env, state->frame[state->curframe]); + do_print_state = false; + } +@@ -5257,19 +5454,19 @@ static int do_check(struct bpf_verifier_env *env) + .private_data = env, + }; + +- verbose(env, "%d: ", insn_idx); ++ verbose(env, "%d: ", env->insn_idx); + print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); + } + + if (bpf_prog_is_dev_bound(env->prog->aux)) { +- err = bpf_prog_offload_verify_insn(env, insn_idx, +- prev_insn_idx); ++ err = bpf_prog_offload_verify_insn(env, env->insn_idx, ++ env->prev_insn_idx); + if (err) + return err; + } + + regs = cur_regs(env); +- env->insn_aux_data[insn_idx].seen = true; ++ env->insn_aux_data[env->insn_idx].seen = true; + + if (class == BPF_ALU || class == BPF_ALU64) { + err = check_alu_op(env, insn); +@@ -5295,13 +5492,13 @@ static int do_check(struct bpf_verifier_env *env) + /* check that memory (src_reg + off) is readable, + * the state of dst_reg will be updated by this func + */ +- err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, +- BPF_SIZE(insn->code), BPF_READ, +- insn->dst_reg, false); ++ err = check_mem_access(env, env->insn_idx, insn->src_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_READ, insn->dst_reg, false); + if (err) + return err; + +- prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; ++ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; + + if (*prev_src_type == NOT_INIT) { + /* saw a valid insn +@@ -5326,10 +5523,10 @@ static int do_check(struct bpf_verifier_env *env) + enum bpf_reg_type *prev_dst_type, dst_reg_type; + + if (BPF_MODE(insn->code) == BPF_XADD) { +- err = check_xadd(env, insn_idx, insn); ++ err = check_xadd(env, env->insn_idx, insn); + if (err) + return err; +- insn_idx++; ++ env->insn_idx++; + continue; + } + +@@ -5345,13 +5542,13 @@ static int do_check(struct bpf_verifier_env *env) + dst_reg_type = regs[insn->dst_reg].type; + + /* check that memory (dst_reg + off) is writeable */ +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_WRITE, +- insn->src_reg, false); ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_WRITE, insn->src_reg, false); + if (err) + return err; + +- prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; ++ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; + + if (*prev_dst_type == NOT_INIT) { + *prev_dst_type = dst_reg_type; +@@ -5379,9 +5576,9 @@ static int do_check(struct bpf_verifier_env *env) + } + + /* check that memory (dst_reg + off) is writeable */ +- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_WRITE, +- -1, false); ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_WRITE, -1, false); + if (err) + return err; + +@@ -5399,9 +5596,9 @@ static int do_check(struct bpf_verifier_env *env) + } + + if (insn->src_reg == BPF_PSEUDO_CALL) +- err = check_func_call(env, insn, &insn_idx); ++ err = check_func_call(env, insn, &env->insn_idx); + else +- err = check_helper_call(env, insn->imm, insn_idx); ++ err = check_helper_call(env, insn->imm, env->insn_idx); + if (err) + return err; + +@@ -5414,7 +5611,7 @@ static int do_check(struct bpf_verifier_env *env) + return -EINVAL; + } + +- insn_idx += insn->off + 1; ++ env->insn_idx += insn->off + 1; + continue; + + } else if (opcode == BPF_EXIT) { +@@ -5428,8 +5625,8 @@ static int do_check(struct bpf_verifier_env *env) + + if (state->curframe) { + /* exit from nested function */ +- prev_insn_idx = insn_idx; +- err = prepare_func_exit(env, &insn_idx); ++ env->prev_insn_idx = env->insn_idx; ++ err = prepare_func_exit(env, &env->insn_idx); + if (err) + return err; + do_print_state = true; +@@ -5459,7 +5656,8 @@ static int do_check(struct bpf_verifier_env *env) + if (err) + return err; + process_bpf_exit: +- err = pop_stack(env, &prev_insn_idx, &insn_idx); ++ err = pop_stack(env, &env->prev_insn_idx, ++ &env->insn_idx); + if (err < 0) { + if (err != -ENOENT) + return err; +@@ -5469,7 +5667,7 @@ process_bpf_exit: + continue; + } + } else { +- err = check_cond_jmp_op(env, insn, &insn_idx); ++ err = check_cond_jmp_op(env, insn, &env->insn_idx); + if (err) + return err; + } +@@ -5486,8 +5684,8 @@ process_bpf_exit: + if (err) + return err; + +- insn_idx++; +- env->insn_aux_data[insn_idx].seen = true; ++ env->insn_idx++; ++ env->insn_aux_data[env->insn_idx].seen = true; + } else { + verbose(env, "invalid BPF_LD mode\n"); + return -EINVAL; +@@ -5497,7 +5695,7 @@ process_bpf_exit: + return -EINVAL; + } + +- insn_idx++; ++ env->insn_idx++; + } + + verbose(env, "processed %d insns (limit %d), stack depth ", +@@ -6220,6 +6418,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + continue; + } + ++ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || ++ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { ++ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; ++ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; ++ struct bpf_insn insn_buf[16]; ++ struct bpf_insn *patch = &insn_buf[0]; ++ bool issrc, isneg; ++ u32 off_reg; ++ ++ aux = &env->insn_aux_data[i + delta]; ++ if (!aux->alu_state) ++ continue; ++ ++ isneg = aux->alu_state & BPF_ALU_NEG_VALUE; ++ issrc = (aux->alu_state & BPF_ALU_SANITIZE) == ++ BPF_ALU_SANITIZE_SRC; ++ ++ off_reg = issrc ? insn->src_reg : insn->dst_reg; ++ if (isneg) ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); ++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); ++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); ++ if (issrc) { ++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, ++ off_reg); ++ insn->src_reg = BPF_REG_AX; ++ } else { ++ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, ++ BPF_REG_AX); ++ } ++ if (isneg) ++ insn->code = insn->code == code_add ? ++ code_sub : code_add; ++ *patch++ = *insn; ++ if (issrc && isneg) ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); ++ cnt = patch - insn_buf; ++ ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ + if (insn->code != (BPF_JMP | BPF_CALL)) + continue; + if (insn->src_reg == BPF_PSEUDO_CALL) +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index 8f0644af40be..80f955210861 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, + * set up the signal and overrun bookkeeping. + */ + timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); ++ timer->it_interval = ns_to_ktime(timer->it.cpu.incr); + + /* + * This acts as a modification timestamp for the timer, +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index e95b5b7c9c3d..995d1079f958 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5542,18 +5542,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, + cond_resched(); + } + } +-#ifdef CONFIG_SPARSEMEM +- /* +- * If the zone does not span the rest of the section then +- * we should at least initialize those pages. Otherwise we +- * could blow up on a poisoned page in some paths which depend +- * on full sections being initialized (e.g. memory hotplug). +- */ +- while (end_pfn % PAGES_PER_SECTION) { +- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); +- end_pfn++; +- } +-#endif + } + + #ifdef CONFIG_ZONE_DEVICE +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c +index 2cb8da465b98..48ddc60b4fbd 100644 +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p, + + int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) + { ++ skb_push(skb, ETH_HLEN); + if (!is_skb_forwardable(skb->dev, skb)) + goto drop; + +- skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL && +@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to, + net = dev_net(indev); + } else { + if (unlikely(netpoll_tx_running(to->br->dev))) { +- if (!is_skb_forwardable(skb->dev, skb)) { ++ skb_push(skb, ETH_HLEN); ++ if (!is_skb_forwardable(skb->dev, skb)) + kfree_skb(skb); +- } else { +- skb_push(skb, ETH_HLEN); ++ else + br_netpoll_send_skb(to, skb); +- } + return; + } + br_hook = NF_BR_LOCAL_OUT; +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c +index 96c072e71ea2..5811208863b7 100644 +--- a/net/bridge/br_netfilter_ipv6.c ++++ b/net/bridge/br_netfilter_ipv6.c +@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) + IPSTATS_MIB_INDISCARDS); + goto drop; + } ++ hdr = ipv6_hdr(skb); + } + if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) + goto drop; +diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c +index 08cbed7d940e..419e8edf23ba 100644 +--- a/net/bridge/netfilter/nft_reject_bridge.c ++++ b/net/bridge/netfilter/nft_reject_bridge.c +@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) + pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) + return false; + ++ ip6h = ipv6_hdr(skb); + thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); + if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) + return false; +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 0af8f0db892a..79bb8afa9c0c 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -67,6 +67,9 @@ + */ + #define MAX_NFRAMES 256 + ++/* limit timers to 400 days for sending/timeouts */ ++#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) ++ + /* use of last_frames[index].flags */ + #define RX_RECV 0x40 /* received data for this element */ + #define RX_THR 0x80 /* element not been sent due to throttle feature */ +@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); + } + ++/* check limitations for timeval provided by user */ ++static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) ++{ ++ if ((msg_head->ival1.tv_sec < 0) || ++ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || ++ (msg_head->ival1.tv_usec < 0) || ++ (msg_head->ival1.tv_usec >= USEC_PER_SEC) || ++ (msg_head->ival2.tv_sec < 0) || ++ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || ++ (msg_head->ival2.tv_usec < 0) || ++ (msg_head->ival2.tv_usec >= USEC_PER_SEC)) ++ return true; ++ ++ return false; ++} ++ + #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) + #define OPSIZ sizeof(struct bcm_op) + #define MHSIZ sizeof(struct bcm_msg_head) +@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) + return -EINVAL; + ++ /* check timeval limitations */ ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) ++ return -EINVAL; ++ + /* check the given can_id */ + op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); + if (op) { +@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + (!(msg_head->can_id & CAN_RTR_FLAG)))) + return -EINVAL; + ++ /* check timeval limitations */ ++ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) ++ return -EINVAL; ++ + /* check the given can_id */ + op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); + if (op) { +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 6df95be96311..fe4f6a624238 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -203,7 +203,7 @@ static void fib_flush(struct net *net) + struct fib_table *tb; + + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) +- flushed += fib_table_flush(net, tb); ++ flushed += fib_table_flush(net, tb, false); + } + + if (flushed) +@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net) + + hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { + hlist_del(&tb->tb_hlist); +- fib_table_flush(net, tb); ++ fib_table_flush(net, tb, true); + fib_free_table(tb); + } + } +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index 237c9f72b265..a573e37e0615 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb) + } + + /* Caller must hold RTNL. */ +-int fib_table_flush(struct net *net, struct fib_table *tb) ++int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) + { + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *pn = t->kv; +@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb) + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { + struct fib_info *fi = fa->fa_info; + +- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || +- tb->tb_id != fa->tb_id) { ++ if (!fi || tb->tb_id != fa->tb_id || ++ (!(fi->fib_flags & RTNH_F_DEAD) && ++ !fib_props[fa->fa_type].error)) { ++ slen = fa->fa_slen; ++ continue; ++ } ++ ++ /* Do not flush error routes if network namespace is ++ * not being dismantled ++ */ ++ if (!flush_all && fib_props[fa->fa_type].error) { + slen = fa->fa_slen; + continue; + } +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 0fe9419bd12b..3407a82d4549 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -567,8 +567,7 @@ err_free_skb: + dev->stats.tx_dropped++; + } + +-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, +- __be16 proto) ++static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct ip_tunnel *tunnel = netdev_priv(dev); + struct ip_tunnel_info *tun_info; +@@ -576,10 +575,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, + struct erspan_metadata *md; + struct rtable *rt = NULL; + bool truncate = false; ++ __be16 df, proto; + struct flowi4 fl; + int tunnel_hlen; + int version; +- __be16 df; + int nhoff; + int thoff; + +@@ -624,18 +623,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, + if (version == 1) { + erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), + ntohl(md->u.index), truncate, true); ++ proto = htons(ETH_P_ERSPAN); + } else if (version == 2) { + erspan_build_header_v2(skb, + ntohl(tunnel_id_to_key32(key->tun_id)), + md->u.md2.dir, + get_hwid(&md->u.md2), + truncate, true); ++ proto = htons(ETH_P_ERSPAN2); + } else { + goto err_free_rt; + } + + gre_build_header(skb, 8, TUNNEL_SEQ, +- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); ++ proto, 0, htonl(tunnel->o_seqno++)); + + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + +@@ -719,12 +720,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, + { + struct ip_tunnel *tunnel = netdev_priv(dev); + bool truncate = false; ++ __be16 proto; + + if (!pskb_inet_may_pull(skb)) + goto free_skb; + + if (tunnel->collect_md) { +- erspan_fb_xmit(skb, dev, skb->protocol); ++ erspan_fb_xmit(skb, dev); + return NETDEV_TX_OK; + } + +@@ -740,19 +742,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, + } + + /* Push ERSPAN header */ +- if (tunnel->erspan_ver == 1) ++ if (tunnel->erspan_ver == 1) { + erspan_build_header(skb, ntohl(tunnel->parms.o_key), + tunnel->index, + truncate, true); +- else if (tunnel->erspan_ver == 2) ++ proto = htons(ETH_P_ERSPAN); ++ } else if (tunnel->erspan_ver == 2) { + erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), + tunnel->dir, tunnel->hwid, + truncate, true); +- else ++ proto = htons(ETH_P_ERSPAN2); ++ } else { + goto free_skb; ++ } + + tunnel->parms.o_flags &= ~TUNNEL_KEY; +- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); ++ __gre_xmit(skb, dev, &tunnel->parms.iph, proto); + return NETDEV_TX_OK; + + free_skb: +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c +index e609b08c9df4..3163428219cd 100644 +--- a/net/ipv4/ip_input.c ++++ b/net/ipv4/ip_input.c +@@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) + goto drop; + } + ++ iph = ip_hdr(skb); + skb->transport_header = skb->network_header + iph->ihl*4; + + /* Remove any debris in the socket control block */ +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 9e6bc4d6daa7..40cbe5609663 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) + flags = msg->msg_flags; + + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { +- if (sk->sk_state != TCP_ESTABLISHED) { ++ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { + err = -EINVAL; + goto out_err; + } +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 1976fddb9e00..ce125f4dc810 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, + const int hlen = skb_network_header_len(skb) + + sizeof(struct udphdr); + +- if (hlen + cork->gso_size > cork->fragsize) ++ if (hlen + cork->gso_size > cork->fragsize) { ++ kfree_skb(skb); + return -EINVAL; +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) ++ } ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { ++ kfree_skb(skb); + return -EINVAL; +- if (sk->sk_no_check_tx) ++ } ++ if (sk->sk_no_check_tx) { ++ kfree_skb(skb); + return -EINVAL; ++ } + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || +- dst_xfrm(skb_dst(skb))) ++ dst_xfrm(skb_dst(skb))) { ++ kfree_skb(skb); + return -EIO; ++ } + + skb_shinfo(skb)->gso_size = cork->gso_size; + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 0f7d434c1eed..b529a79ac222 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -920,6 +920,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, + __u8 dsfield = false; + struct flowi6 fl6; + int err = -EINVAL; ++ __be16 proto; + __u32 mtu; + int nhoff; + int thoff; +@@ -1033,8 +1034,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, + } + + /* Push GRE header. */ +- gre_build_header(skb, 8, TUNNEL_SEQ, +- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); ++ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) ++ : htons(ETH_P_ERSPAN2); ++ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); + + /* TooBig packet may have updated dst->dev's mtu */ + if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) +@@ -1167,6 +1169,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, + t->parms.i_flags = p->i_flags; + t->parms.o_flags = p->o_flags; + t->parms.fwmark = p->fwmark; ++ t->parms.erspan_ver = p->erspan_ver; ++ t->parms.index = p->index; ++ t->parms.dir = p->dir; ++ t->parms.hwid = p->hwid; + dst_cache_reset(&t->dst_cache); + } + +@@ -2029,9 +2035,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) + { +- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); ++ struct ip6_tnl *t = netdev_priv(dev); ++ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); + struct __ip6_tnl_parm p; +- struct ip6_tnl *t; + + t = ip6gre_changelink_common(dev, tb, data, &p, extack); + if (IS_ERR(t)) +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index d2d97d07ef27..d01ec252cb81 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, + const int hlen = skb_network_header_len(skb) + + sizeof(struct udphdr); + +- if (hlen + cork->gso_size > cork->fragsize) ++ if (hlen + cork->gso_size > cork->fragsize) { ++ kfree_skb(skb); + return -EINVAL; +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) ++ } ++ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { ++ kfree_skb(skb); + return -EINVAL; +- if (udp_sk(sk)->no_check6_tx) ++ } ++ if (udp_sk(sk)->no_check6_tx) { ++ kfree_skb(skb); + return -EINVAL; ++ } + if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || +- dst_xfrm(skb_dst(skb))) ++ dst_xfrm(skb_dst(skb))) { ++ kfree_skb(skb); + return -EIO; ++ } + + skb_shinfo(skb)->gso_size = cork->gso_size; + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c +index 865ecef68196..c7b6010b2c09 100644 +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, + return -EINVAL; + } + +- if (!nz || !is_all_zero(nla_data(nla), expected_len)) { ++ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { + attrs |= 1 << type; + a[type] = nla; + } +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c +index 4cca8f274662..904730b8ce8f 100644 +--- a/net/sched/act_tunnel_key.c ++++ b/net/sched/act_tunnel_key.c +@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, + }; + ++static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) ++{ ++ if (!p) ++ return; ++ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) ++ dst_release(&p->tcft_enc_metadata->dst); ++ kfree_rcu(p, rcu); ++} ++ + static int tunnel_key_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action **a, + int ovr, int bind, bool rtnl_held, +@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, + rcu_swap_protected(t->params, params_new, + lockdep_is_held(&t->tcf_lock)); + spin_unlock_bh(&t->tcf_lock); +- if (params_new) +- kfree_rcu(params_new, rcu); ++ tunnel_key_release_params(params_new); + + if (ret == ACT_P_CREATED) + tcf_idr_insert(tn, *a); +@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a) + struct tcf_tunnel_key_params *params; + + params = rcu_dereference_protected(t->params, 1); +- if (params) { +- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) +- dst_release(¶ms->tcft_enc_metadata->dst); +- +- kfree_rcu(params, rcu); +- } ++ tunnel_key_release_params(params); + } + + static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index f427a1e00e7e..1c4436523aa5 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -1053,7 +1053,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, + int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode) + { +- __be16 protocol = tc_skb_protocol(skb); + #ifdef CONFIG_NET_CLS_ACT + const int max_reclassify_loop = 4; + const struct tcf_proto *orig_tp = tp; +@@ -1063,6 +1062,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + reclassify: + #endif + for (; tp; tp = rcu_dereference_bh(tp->next)) { ++ __be16 protocol = tc_skb_protocol(skb); + int err; + + if (tp->protocol != protocol && +@@ -1095,7 +1095,6 @@ reset: + } + + tp = first_tp; +- protocol = tc_skb_protocol(skb); + goto reclassify; + #endif + } +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c +index 208d940464d7..45bc2b72dc1c 100644 +--- a/net/sched/cls_flower.c ++++ b/net/sched/cls_flower.c +@@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, + struct cls_fl_head *head = rtnl_dereference(tp->root); + struct cls_fl_filter *fold = *arg; + struct cls_fl_filter *fnew; ++ struct fl_flow_mask *mask; + struct nlattr **tb; +- struct fl_flow_mask mask = {}; + int err; + + if (!tca[TCA_OPTIONS]) + return -EINVAL; + +- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); +- if (!tb) ++ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); ++ if (!mask) + return -ENOBUFS; + ++ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); ++ if (!tb) { ++ err = -ENOBUFS; ++ goto errout_mask_alloc; ++ } ++ + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], + fl_policy, NULL); + if (err < 0) +@@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, + } + } + +- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, ++ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, + tp->chain->tmplt_priv, extack); + if (err) + goto errout_idr; + +- err = fl_check_assign_mask(head, fnew, fold, &mask); ++ err = fl_check_assign_mask(head, fnew, fold, mask); + if (err) + goto errout_idr; + +@@ -1278,6 +1284,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, + } + + kfree(tb); ++ kfree(mask); + return 0; + + errout_mask: +@@ -1291,6 +1298,8 @@ errout: + kfree(fnew); + errout_tb: + kfree(tb); ++errout_mask_alloc: ++ kfree(mask); + return err; + } + +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c +index 73547d17d3c6..943f08be7c38 100644 +--- a/net/sunrpc/xprt.c ++++ b/net/sunrpc/xprt.c +@@ -1177,7 +1177,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) + INIT_LIST_HEAD(&req->rq_xmit2); + goto out; + } +- } else { ++ } else if (!req->rq_seqno) { + list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { + if (pos->rq_task->tk_owner != task->tk_owner) + continue; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 51cc6589443f..152f54137082 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), ++ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 0d95316d6dbd..8ddd016c04d0 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6842,7 +6842,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, + {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, +- {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, ++ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, + {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, + {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, + {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, +diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c +index 4d46f4567c3a..bec2eefa8b0f 100644 +--- a/sound/soc/codecs/rt5514-spi.c ++++ b/sound/soc/codecs/rt5514-spi.c +@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component) + + rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), + GFP_KERNEL); ++ if (!rt5514_dsp) ++ return -ENOMEM; + + rt5514_dsp->dev = &rt5514_spi->dev; + mutex_init(&rt5514_dsp->dma_lock); +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c +index e2b5a11b16d1..f03195d2ab2e 100644 +--- a/sound/soc/codecs/tlv320aic32x4.c ++++ b/sound/soc/codecs/tlv320aic32x4.c +@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component, + case SND_SOC_BIAS_PREPARE: + break; + case SND_SOC_BIAS_STANDBY: ++ /* Initial cold start */ ++ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) ++ break; ++ + /* Switch off BCLK_N Divider */ + snd_soc_component_update_bits(component, AIC32X4_BCLKN, + AIC32X4_BCLKEN, 0); +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +index afc559866095..91a2436ce952 100644 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) + { +- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); ++ int ret; ++ ++ ret = ++ snd_pcm_lib_malloc_pages(substream, ++ params_buffer_bytes(params)); ++ if (ret) ++ return ret; + memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); + return 0; + } +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c +index 460b4bdf4c1e..5d546dcdbc80 100644 +--- a/tools/testing/selftests/x86/protection_keys.c ++++ b/tools/testing/selftests/x86/protection_keys.c +@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) + pkey_assert(err); + } + ++void become_child(void) ++{ ++ pid_t forkret; ++ ++ forkret = fork(); ++ pkey_assert(forkret >= 0); ++ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret); ++ ++ if (!forkret) { ++ /* in the child */ ++ return; ++ } ++ exit(0); ++} ++ + /* Assumes that all pkeys other than 'pkey' are unallocated */ + void test_pkey_alloc_exhaust(int *ptr, u16 pkey) + { +@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) + int nr_allocated_pkeys = 0; + int i; + +- for (i = 0; i < NR_PKEYS*2; i++) { ++ for (i = 0; i < NR_PKEYS*3; i++) { + int new_pkey; + dprintf1("%s() alloc loop: %d\n", __func__, i); + new_pkey = alloc_pkey(); +@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) + if ((new_pkey == -1) && (errno == ENOSPC)) { + dprintf2("%s() failed to allocate pkey after %d tries\n", + __func__, nr_allocated_pkeys); +- break; ++ } else { ++ /* ++ * Ensure the number of successes never ++ * exceeds the number of keys supported ++ * in the hardware. ++ */ ++ pkey_assert(nr_allocated_pkeys < NR_PKEYS); ++ allocated_pkeys[nr_allocated_pkeys++] = new_pkey; + } +- pkey_assert(nr_allocated_pkeys < NR_PKEYS); +- allocated_pkeys[nr_allocated_pkeys++] = new_pkey; ++ ++ /* ++ * Make sure that allocation state is properly ++ * preserved across fork(). ++ */ ++ if (i == NR_PKEYS*2) ++ become_child(); + } + + dprintf3("%s()::%d\n", __func__, __LINE__); + +- /* +- * ensure it did not reach the end of the loop without +- * failure: +- */ +- pkey_assert(i < NR_PKEYS*2); +- + /* + * There are 16 pkeys supported in hardware. Three are + * allocated by the time we get here: |