summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2017-12-25 14:34:27 +0000
committerAlice Ferrazzi <alicef@gentoo.org>2017-12-25 14:34:27 +0000
commit4d2989733f91cbdaee7aa1f45101fbe8757b89a3 (patch)
tree7654cde2f8decae6bc1c435b7eb47156b39d2255
parentLinux patch 4.4.107 (diff)
downloadlinux-patches-4d2989733f91cbdaee7aa1f45101fbe8757b89a3.tar.gz
linux-patches-4d2989733f91cbdaee7aa1f45101fbe8757b89a3.tar.bz2
linux-patches-4d2989733f91cbdaee7aa1f45101fbe8757b89a3.zip
linux kernel 4.4.1084.4-111
-rw-r--r--0000_README4
-rw-r--r--1107_linux-4.4.108.patch2635
2 files changed, 2639 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 66f2735d..832fff6b 100644
--- a/0000_README
+++ b/0000_README
@@ -471,6 +471,10 @@ Patch: 1106_linux-4.4.107.patch
From: http://www.kernel.org
Desc: Linux 4.4.107
+Patch: 1107_linux-4.4.108.patch
+From: http://www.kernel.org
+Desc: Linux 4.4.108
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1107_linux-4.4.108.patch b/1107_linux-4.4.108.patch
new file mode 100644
index 00000000..1c8113b7
--- /dev/null
+++ b/1107_linux-4.4.108.patch
@@ -0,0 +1,2635 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 7c77d7edb851..5d593ecadb90 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2519,6 +2519,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ nointroute [IA-64]
+
++ noinvpcid [X86] Disable the INVPCID cpu feature.
++
+ nojitter [IA-64] Disables jitter checking for ITC timers.
+
+ no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
+diff --git a/Makefile b/Makefile
+index f7997b15d055..99f9834c4ba6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 107
++SUBLEVEL = 108
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
+index 4c51c05333c6..4cafffa80e2c 100644
+--- a/arch/alpha/include/asm/mmu_context.h
++++ b/arch/alpha/include/asm/mmu_context.h
+@@ -7,6 +7,7 @@
+ * Copyright (C) 1996, Linus Torvalds
+ */
+
++#include <linux/sched.h>
+ #include <asm/machvec.h>
+ #include <asm/compiler.h>
+ #include <asm-generic/mm_hooks.h>
+diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
+index 89442e98a837..3af570517903 100644
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -668,6 +668,7 @@
+ ti,non-removable;
+ bus-width = <4>;
+ cap-power-off-card;
++ keep-power-in-suspend;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index c2a03c740e79..02bd6312d1d9 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -227,6 +227,7 @@
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x03000 0 0x00010000
+ 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
++ bus-range = <0x00 0xff>;
+ #interrupt-cells = <1>;
+ num-lanes = <1>;
+ ti,hwmods = "pcie1";
+@@ -262,6 +263,7 @@
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x03000 0 0x00010000
+ 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
++ bus-range = <0x00 0xff>;
+ #interrupt-cells = <1>;
+ num-lanes = <1>;
+ ti,hwmods = "pcie2";
+diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
+index 9b32f76bb0dd..10f662498eb7 100644
+--- a/arch/arm/include/asm/mmu_context.h
++++ b/arch/arm/include/asm/mmu_context.h
+@@ -61,6 +61,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
+ cpu_switch_mm(mm->pgd, mm);
+ }
+
++#ifndef MODULE
+ #define finish_arch_post_lock_switch \
+ finish_arch_post_lock_switch
+ static inline void finish_arch_post_lock_switch(void)
+@@ -82,6 +83,7 @@ static inline void finish_arch_post_lock_switch(void)
+ preempt_enable_no_resched();
+ }
+ }
++#endif /* !MODULE */
+
+ #endif /* CONFIG_MMU */
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 534a60ae282e..613c1d06316a 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -774,13 +774,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
+ __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
+ }
+
++/*
++ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
++ * that the intention is to allow exporting memory allocated via the
++ * coherent DMA APIs through the dma_buf API, which only accepts a
++ * scattertable. This presents a couple of problems:
++ * 1. Not all memory allocated via the coherent DMA APIs is backed by
++ * a struct page
++ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
++ * as we will try to flush the memory through a different alias to that
++ * actually being used (and the flushes are redundant.)
++ */
+ int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+ {
+- struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
++ unsigned long pfn = dma_to_pfn(dev, handle);
++ struct page *page;
+ int ret;
+
++ /* If the PFN is not valid, we do not have a struct page */
++ if (!pfn_valid(pfn))
++ return -ENXIO;
++
++ page = pfn_to_page(pfn);
++
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index a4ec240ee7ba..3eb018fa1a1f 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
++ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+@@ -455,14 +456,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ /* another task is sharing our hash bucket */
+ continue;
+
++ orig_ret_address = (unsigned long)ri->ret_addr;
++
++ if (orig_ret_address != trampoline_address)
++ /*
++ * This is the real return address. Any other
++ * instances associated with this task are for
++ * other calls deeper on the call stack
++ */
++ break;
++ }
++
++ kretprobe_assert(ri, orig_ret_address, trampoline_address);
++
++ correct_ret_addr = ri->ret_addr;
++ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
++ if (ri->task != current)
++ /* another task is sharing our hash bucket */
++ continue;
++
++ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
++ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+- orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+@@ -474,7 +495,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ break;
+ }
+
+- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
+index 9775de22e2ff..a48354de1aa1 100644
+--- a/arch/arm/probes/kprobes/test-core.c
++++ b/arch/arm/probes/kprobes/test-core.c
+@@ -976,7 +976,10 @@ static void coverage_end(void)
+ void __naked __kprobes_test_case_start(void)
+ {
+ __asm__ __volatile__ (
+- "stmdb sp!, {r4-r11} \n\t"
++ "mov r2, sp \n\t"
++ "bic r3, r2, #7 \n\t"
++ "mov sp, r3 \n\t"
++ "stmdb sp!, {r2-r11} \n\t"
+ "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+ "bic r0, lr, #1 @ r0 = inline data \n\t"
+ "mov r1, sp \n\t"
+@@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
+ "movne pc, r0 \n\t"
+ "mov r0, r4 \n\t"
+ "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+- "ldmia sp!, {r4-r11} \n\t"
++ "ldmia sp!, {r2-r11} \n\t"
++ "mov sp, r2 \n\t"
+ "mov pc, r0 \n\t"
+ );
+ }
+@@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
+ "bxne r0 \n\t"
+ "mov r0, r4 \n\t"
+ "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+- "ldmia sp!, {r4-r11} \n\t"
++ "ldmia sp!, {r2-r11} \n\t"
++ "mov sp, r2 \n\t"
+ "bx r0 \n\t"
+ );
+ }
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 4cb98aa8c27b..efd89ce4533d 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -178,6 +178,7 @@ void __init arm64_memblock_init(void)
+ arm64_dma_phys_limit = max_zone_dma_phys();
+ else
+ arm64_dma_phys_limit = PHYS_MASK + 1;
++ high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+ dma_contiguous_reserve(arm64_dma_phys_limit);
+
+ memblock_allow_resize();
+@@ -202,7 +203,6 @@ void __init bootmem_init(void)
+ sparse_init();
+ zone_sizes_init(min, max);
+
+- high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
+ max_pfn = max_low_pfn = max;
+ }
+
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index dd058aa8a3b5..89d05de8040a 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1777,7 +1777,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_maddf(fd, fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmsubf_op: {
+@@ -1790,7 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_msubf(fd, fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case frint_op: {
+@@ -1814,7 +1814,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754sp_2008class(fs);
+ rfmt = w_fmt;
+- break;
++ goto copcsr;
+ }
+
+ case fmin_op: {
+@@ -1826,7 +1826,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmin(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmina_op: {
+@@ -1838,7 +1838,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmina(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmax_op: {
+@@ -1850,7 +1850,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmax(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmaxa_op: {
+@@ -1862,7 +1862,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmaxa(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fabs_op:
+@@ -2095,7 +2095,7 @@ copcsr:
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_maddf(fd, fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmsubf_op: {
+@@ -2108,7 +2108,7 @@ copcsr:
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_msubf(fd, fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case frint_op: {
+@@ -2132,7 +2132,7 @@ copcsr:
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754dp_2008class(fs);
+ rfmt = w_fmt;
+- break;
++ goto copcsr;
+ }
+
+ case fmin_op: {
+@@ -2144,7 +2144,7 @@ copcsr:
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmin(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmina_op: {
+@@ -2156,7 +2156,7 @@ copcsr:
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmina(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmax_op: {
+@@ -2168,7 +2168,7 @@ copcsr:
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmax(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fmaxa_op: {
+@@ -2180,7 +2180,7 @@ copcsr:
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmaxa(fs, ft);
+- break;
++ goto copcsr;
+ }
+
+ case fabs_op:
+diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
+index 7178043b0e1d..59405a248fc2 100644
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -22,10 +22,6 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+- /*
+- * irq_tlb_count is double-counted in irq_call_count, so it must be
+- * subtracted from irq_call_count when displaying irq_call_count
+- */
+ unsigned int irq_tlb_count;
+ #endif
+ #ifdef CONFIG_X86_THERMAL_VECTOR
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index bfd9b2a35a0b..44fc93987869 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -104,103 +104,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ #endif
+ }
+
+-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+- struct task_struct *tsk)
+-{
+- unsigned cpu = smp_processor_id();
+-
+- if (likely(prev != next)) {
+-#ifdef CONFIG_SMP
+- this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+- this_cpu_write(cpu_tlbstate.active_mm, next);
+-#endif
+- cpumask_set_cpu(cpu, mm_cpumask(next));
+-
+- /*
+- * Re-load page tables.
+- *
+- * This logic has an ordering constraint:
+- *
+- * CPU 0: Write to a PTE for 'next'
+- * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
+- * CPU 1: set bit 1 in next's mm_cpumask
+- * CPU 1: load from the PTE that CPU 0 writes (implicit)
+- *
+- * We need to prevent an outcome in which CPU 1 observes
+- * the new PTE value and CPU 0 observes bit 1 clear in
+- * mm_cpumask. (If that occurs, then the IPI will never
+- * be sent, and CPU 0's TLB will contain a stale entry.)
+- *
+- * The bad outcome can occur if either CPU's load is
+- * reordered before that CPU's store, so both CPUs must
+- * execute full barriers to prevent this from happening.
+- *
+- * Thus, switch_mm needs a full barrier between the
+- * store to mm_cpumask and any operation that could load
+- * from next->pgd. TLB fills are special and can happen
+- * due to instruction fetches or for no reason at all,
+- * and neither LOCK nor MFENCE orders them.
+- * Fortunately, load_cr3() is serializing and gives the
+- * ordering guarantee we need.
+- *
+- */
+- load_cr3(next->pgd);
+-
+- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+-
+- /* Stop flush ipis for the previous mm */
+- cpumask_clear_cpu(cpu, mm_cpumask(prev));
+-
+- /* Load per-mm CR4 state */
+- load_mm_cr4(next);
++extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk);
+
+-#ifdef CONFIG_MODIFY_LDT_SYSCALL
+- /*
+- * Load the LDT, if the LDT is different.
+- *
+- * It's possible that prev->context.ldt doesn't match
+- * the LDT register. This can happen if leave_mm(prev)
+- * was called and then modify_ldt changed
+- * prev->context.ldt but suppressed an IPI to this CPU.
+- * In this case, prev->context.ldt != NULL, because we
+- * never set context.ldt to NULL while the mm still
+- * exists. That means that next->context.ldt !=
+- * prev->context.ldt, because mms never share an LDT.
+- */
+- if (unlikely(prev->context.ldt != next->context.ldt))
+- load_mm_ldt(next);
+-#endif
+- }
+-#ifdef CONFIG_SMP
+- else {
+- this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+- BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+-
+- if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+- /*
+- * On established mms, the mm_cpumask is only changed
+- * from irq context, from ptep_clear_flush() while in
+- * lazy tlb mode, and here. Irqs are blocked during
+- * schedule, protecting us from simultaneous changes.
+- */
+- cpumask_set_cpu(cpu, mm_cpumask(next));
+-
+- /*
+- * We were in lazy tlb mode and leave_mm disabled
+- * tlb flush IPI delivery. We must reload CR3
+- * to make sure to use no freed page tables.
+- *
+- * As above, load_cr3() is serializing and orders TLB
+- * fills with respect to the mm_cpumask write.
+- */
+- load_cr3(next->pgd);
+- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+- load_mm_cr4(next);
+- load_mm_ldt(next);
+- }
+- }
+-#endif
+-}
++extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk);
++#define switch_mm_irqs_off switch_mm_irqs_off
+
+ #define activate_mm(prev, next) \
+ do { \
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 6433e28dc9c8..4dc534175b5e 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -7,6 +7,54 @@
+ #include <asm/processor.h>
+ #include <asm/special_insns.h>
+
++static inline void __invpcid(unsigned long pcid, unsigned long addr,
++ unsigned long type)
++{
++ struct { u64 d[2]; } desc = { { pcid, addr } };
++
++ /*
++ * The memory clobber is because the whole point is to invalidate
++ * stale TLB entries and, especially if we're flushing global
++ * mappings, we don't want the compiler to reorder any subsequent
++ * memory accesses before the TLB flush.
++ *
++ * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
++ * invpcid (%rcx), %rax in long mode.
++ */
++ asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
++ : : "m" (desc), "a" (type), "c" (&desc) : "memory");
++}
++
++#define INVPCID_TYPE_INDIV_ADDR 0
++#define INVPCID_TYPE_SINGLE_CTXT 1
++#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
++#define INVPCID_TYPE_ALL_NON_GLOBAL 3
++
++/* Flush all mappings for a given pcid and addr, not including globals. */
++static inline void invpcid_flush_one(unsigned long pcid,
++ unsigned long addr)
++{
++ __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
++}
++
++/* Flush all mappings for a given PCID, not including globals. */
++static inline void invpcid_flush_single_context(unsigned long pcid)
++{
++ __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
++}
++
++/* Flush all mappings, including globals, for all PCIDs. */
++static inline void invpcid_flush_all(void)
++{
++ __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
++}
++
++/* Flush all mappings for all PCIDs except globals. */
++static inline void invpcid_flush_all_nonglobals(void)
++{
++ __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
++}
++
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/paravirt.h>
+ #else
+@@ -111,6 +159,15 @@ static inline void __native_flush_tlb_global(void)
+ {
+ unsigned long flags;
+
++ if (static_cpu_has(X86_FEATURE_INVPCID)) {
++ /*
++ * Using INVPCID is considerably faster than a pair of writes
++ * to CR4 sandwiched inside an IRQ flag save/restore.
++ */
++ invpcid_flush_all();
++ return;
++ }
++
+ /*
+ * Read-modify-write to CR4 - protect it from preemption and
+ * from interrupts. (Use the raw variant because this code can
+@@ -268,12 +325,6 @@ static inline void reset_lazy_tlbstate(void)
+
+ #endif /* SMP */
+
+-/* Not inlined due to inc_irq_stat not being defined yet */
+-#define flush_tlb_local() { \
+- inc_irq_stat(irq_tlb_count); \
+- local_flush_tlb(); \
+-}
+-
+ #ifndef CONFIG_PARAVIRT
+ #define flush_tlb_others(mask, mm, start, end) \
+ native_flush_tlb_others(mask, mm, start, end)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 637ca414d431..c84b62956e8d 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -162,6 +162,22 @@ static int __init x86_mpx_setup(char *s)
+ }
+ __setup("nompx", x86_mpx_setup);
+
++static int __init x86_noinvpcid_setup(char *s)
++{
++ /* noinvpcid doesn't accept parameters */
++ if (s)
++ return -EINVAL;
++
++ /* do not emit a message if the feature is not present */
++ if (!boot_cpu_has(X86_FEATURE_INVPCID))
++ return 0;
++
++ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
++ pr_info("noinvpcid: INVPCID feature disabled\n");
++ return 0;
++}
++early_param("noinvpcid", x86_noinvpcid_setup);
++
+ #ifdef CONFIG_X86_32
+ static int cachesize_override = -1;
+ static int disable_x86_serial_nr = 1;
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 61521dc19c10..9f669fdd2010 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+ seq_puts(p, " Rescheduling interrupts\n");
+ seq_printf(p, "%*s: ", prec, "CAL");
+ for_each_online_cpu(j)
+- seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
+- irq_stats(j)->irq_tlb_count);
++ seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+ seq_puts(p, " Function call interrupts\n");
+ seq_printf(p, "%*s: ", prec, "TLB");
+ for_each_online_cpu(j)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index dcbafe53e2d4..d915185ada05 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1107,6 +1107,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
+ return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ }
+
++static inline bool cpu_has_vmx_invvpid(void)
++{
++ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
++}
++
+ static inline bool cpu_has_vmx_ept(void)
+ {
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+@@ -6199,8 +6204,10 @@ static __init int hardware_setup(void)
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
+- if (!cpu_has_vmx_vpid())
++ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
++ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
+ enable_vpid = 0;
++
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index df81717a92f3..e5f44f33de89 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8230,11 +8230,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+ {
+ struct x86_exception fault;
+
+- trace_kvm_async_pf_ready(work->arch.token, work->gva);
+ if (work->wakeup_all)
+ work->arch.token = ~0; /* broadcast wakeup */
+ else
+ kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
++ trace_kvm_async_pf_ready(work->arch.token, work->gva);
+
+ if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+ !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
+index 65c47fda26fc..1ae7c141f778 100644
+--- a/arch/x86/mm/Makefile
++++ b/arch/x86/mm/Makefile
+@@ -1,5 +1,5 @@
+ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
+- pat.o pgtable.o physaddr.o gup.o setup_nx.o
++ pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
+
+ # Make sure __phys_addr has no stackprotector
+ nostackp := $(call cc-option, -fno-stack-protector)
+@@ -9,7 +9,6 @@ CFLAGS_setup_nx.o := $(nostackp)
+ CFLAGS_fault.o := -I$(src)/../include/asm/trace
+
+ obj-$(CONFIG_X86_PAT) += pat_rbtree.o
+-obj-$(CONFIG_SMP) += tlb.o
+
+ obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
+
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 5a760fd66bec..45ba87466e6a 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -28,6 +28,8 @@
+ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
+ */
+
++#ifdef CONFIG_SMP
++
+ struct flush_tlb_info {
+ struct mm_struct *flush_mm;
+ unsigned long flush_start;
+@@ -57,6 +59,118 @@ void leave_mm(int cpu)
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+
++#endif /* CONFIG_SMP */
++
++void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ switch_mm_irqs_off(prev, next, tsk);
++ local_irq_restore(flags);
++}
++
++void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned cpu = smp_processor_id();
++
++ if (likely(prev != next)) {
++#ifdef CONFIG_SMP
++ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
++ this_cpu_write(cpu_tlbstate.active_mm, next);
++#endif
++ cpumask_set_cpu(cpu, mm_cpumask(next));
++
++ /*
++ * Re-load page tables.
++ *
++ * This logic has an ordering constraint:
++ *
++ * CPU 0: Write to a PTE for 'next'
++ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
++ * CPU 1: set bit 1 in next's mm_cpumask
++ * CPU 1: load from the PTE that CPU 0 writes (implicit)
++ *
++ * We need to prevent an outcome in which CPU 1 observes
++ * the new PTE value and CPU 0 observes bit 1 clear in
++ * mm_cpumask. (If that occurs, then the IPI will never
++ * be sent, and CPU 0's TLB will contain a stale entry.)
++ *
++ * The bad outcome can occur if either CPU's load is
++ * reordered before that CPU's store, so both CPUs must
++ * execute full barriers to prevent this from happening.
++ *
++ * Thus, switch_mm needs a full barrier between the
++ * store to mm_cpumask and any operation that could load
++ * from next->pgd. TLB fills are special and can happen
++ * due to instruction fetches or for no reason at all,
++ * and neither LOCK nor MFENCE orders them.
++ * Fortunately, load_cr3() is serializing and gives the
++ * ordering guarantee we need.
++ *
++ */
++ load_cr3(next->pgd);
++
++ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
++
++ /* Stop flush ipis for the previous mm */
++ cpumask_clear_cpu(cpu, mm_cpumask(prev));
++
++ /* Load per-mm CR4 state */
++ load_mm_cr4(next);
++
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
++ /*
++ * Load the LDT, if the LDT is different.
++ *
++ * It's possible that prev->context.ldt doesn't match
++ * the LDT register. This can happen if leave_mm(prev)
++ * was called and then modify_ldt changed
++ * prev->context.ldt but suppressed an IPI to this CPU.
++ * In this case, prev->context.ldt != NULL, because we
++ * never set context.ldt to NULL while the mm still
++ * exists. That means that next->context.ldt !=
++ * prev->context.ldt, because mms never share an LDT.
++ */
++ if (unlikely(prev->context.ldt != next->context.ldt))
++ load_mm_ldt(next);
++#endif
++ }
++#ifdef CONFIG_SMP
++ else {
++ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
++ BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
++
++ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
++ /*
++ * On established mms, the mm_cpumask is only changed
++ * from irq context, from ptep_clear_flush() while in
++ * lazy tlb mode, and here. Irqs are blocked during
++ * schedule, protecting us from simultaneous changes.
++ */
++ cpumask_set_cpu(cpu, mm_cpumask(next));
++
++ /*
++ * We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload CR3
++ * to make sure to use no freed page tables.
++ *
++ * As above, load_cr3() is serializing and orders TLB
++ * fills with respect to the mm_cpumask write.
++ */
++ load_cr3(next->pgd);
++ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
++ load_mm_cr4(next);
++ load_mm_ldt(next);
++ }
++ }
++#endif
++}
++
++#ifdef CONFIG_SMP
++
+ /*
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+@@ -104,7 +218,7 @@ static void flush_tlb_func(void *info)
+
+ inc_irq_stat(irq_tlb_count);
+
+- if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
++ if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ return;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+@@ -351,3 +465,5 @@ static int __init create_tlb_single_page_flush_ceiling(void)
+ return 0;
+ }
+ late_initcall(create_tlb_single_page_flush_ceiling);
++
++#endif /* CONFIG_SMP */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index ce120fbe229e..1ccad79ce77c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1050,10 +1050,6 @@ static int btusb_open(struct hci_dev *hdev)
+ return err;
+
+ data->intf->needs_remote_wakeup = 1;
+- /* device specific wakeup source enabled and required for USB
+- * remote wakeup while host is suspended
+- */
+- device_wakeup_enable(&data->udev->dev);
+
+ if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
+ goto done;
+@@ -1117,7 +1113,6 @@ static int btusb_close(struct hci_dev *hdev)
+ goto failed;
+
+ data->intf->needs_remote_wakeup = 0;
+- device_wakeup_disable(&data->udev->dev);
+ usb_autopm_put_interface(data->intf);
+
+ failed:
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 845bafcfa792..d5c5a476360f 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -160,6 +160,24 @@ static int powernv_cpuidle_driver_init(void)
+ drv->state_count += 1;
+ }
+
++ /*
++ * On the PowerNV platform cpu_present may be less than cpu_possible in
++ * cases when firmware detects the CPU, but it is not available to the
++ * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
++ * run time and hence cpu_devices are not created for those CPUs by the
++ * generic topology_init().
++ *
++ * drv->cpumask defaults to cpu_possible_mask in
++ * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
++ * cpu_devices are not created for CPUs in cpu_possible_mask that
++ * cannot be hot-added later at run time.
++ *
++ * Trying cpuidle_register_device() on a CPU without a cpu_device is
++ * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
++ */
++
++ drv->cpumask = (struct cpumask *)cpu_present_mask;
++
+ return 0;
+ }
+
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index d40b2c077746..f1dd0f73820d 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -189,6 +189,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ return -EBUSY;
+ }
+ target_state = &drv->states[index];
++ broadcast = false;
+ }
+
+ /* Take note of the planned idle state. */
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 832a2c3f01ff..9e98a5fbbc1d 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
+ int error;
+
++ /*
++ * Return if cpu_device is not setup for this CPU.
++ *
++ * This could happen if the arch did not set up cpu_device
++ * since this CPU is not in cpu_present mask and the
++ * driver did not send a correct CPU mask during registration.
++ * Without this check we would end up passing bogus
++ * value for &cpu_dev->kobj in kobject_init_and_add()
++ */
++ if (!cpu_dev)
++ return -ENODEV;
++
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
+index bac0bdeb4b5f..b6529b9fcbe2 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -32,12 +32,12 @@
+ #define PPC405EX_CE_RESET 0x00000008
+
+ #define CRYPTO4XX_CRYPTO_PRIORITY 300
+-#define PPC4XX_LAST_PD 63
+-#define PPC4XX_NUM_PD 64
+-#define PPC4XX_LAST_GD 1023
++#define PPC4XX_NUM_PD 256
++#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
+ #define PPC4XX_NUM_GD 1024
+-#define PPC4XX_LAST_SD 63
+-#define PPC4XX_NUM_SD 64
++#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
++#define PPC4XX_NUM_SD 256
++#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
+ #define PPC4XX_SD_BUFFER_SIZE 2048
+
+ #define PD_ENTRY_INUSE 1
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 1a1fc8351289..3ba486d0ec6f 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2053,6 +2053,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 6937086060a6..b554d17c9156 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1021,6 +1021,7 @@
+
+ #define USB_VENDOR_ID_XIN_MO 0x16c0
+ #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
++#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
+
+ #define USB_VENDOR_ID_XIROKU 0x1477
+ #define USB_DEVICE_ID_XIROKU_SPX 0x1006
+diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
+index 7df5227a7e61..9ad7731d2e10 100644
+--- a/drivers/hid/hid-xinmo.c
++++ b/drivers/hid/hid-xinmo.c
+@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
+
+ static const struct hid_device_id xinmo_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+ { }
+ };
+
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index cccef87963e0..975c43d446f8 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
+ else
+ err = atk_read_value_new(sensor, value);
+
++ if (err)
++ return err;
++
+ sensor->is_valid = true;
+ sensor->last_updated = jiffies;
+ sensor->cached_value = *value;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 8a5998e6a407..88f97ea6b366 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -450,6 +450,7 @@ struct iser_fr_desc {
+ struct list_head list;
+ struct iser_reg_resources rsc;
+ struct iser_pi_context *pi_ctx;
++ struct list_head all_list;
+ };
+
+ /**
+@@ -463,6 +464,7 @@ struct iser_fr_pool {
+ struct list_head list;
+ spinlock_t lock;
+ int size;
++ struct list_head all_list;
+ };
+
+ /**
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 42f4da620f2e..0cbc7ceb9a55 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -405,6 +405,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ int i, ret;
+
+ INIT_LIST_HEAD(&fr_pool->list);
++ INIT_LIST_HEAD(&fr_pool->all_list);
+ spin_lock_init(&fr_pool->lock);
+ fr_pool->size = 0;
+ for (i = 0; i < cmds_max; i++) {
+@@ -416,6 +417,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ }
+
+ list_add_tail(&desc->list, &fr_pool->list);
++ list_add_tail(&desc->all_list, &fr_pool->all_list);
+ fr_pool->size++;
+ }
+
+@@ -435,13 +437,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
+ struct iser_fr_desc *desc, *tmp;
+ int i = 0;
+
+- if (list_empty(&fr_pool->list))
++ if (list_empty(&fr_pool->all_list))
+ return;
+
+ iser_info("freeing conn %p fr pool\n", ib_conn);
+
+- list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
+- list_del(&desc->list);
++ list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
++ list_del(&desc->all_list);
+ iser_free_reg_res(&desc->rsc);
+ if (desc->pi_ctx)
+ iser_free_pi_ctx(desc->pi_ctx);
+diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
+index 823f6985b260..dd7e38ac29bd 100644
+--- a/drivers/isdn/capi/kcapi.c
++++ b/drivers/isdn/capi/kcapi.c
+@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
+ sizeof(avmb1_carddef))))
+ return -EFAULT;
+ cdef.cardtype = AVM_CARDTYPE_B1;
++ cdef.cardnr = 0;
+ } else {
+ if ((retval = copy_from_user(&cdef, data,
+ sizeof(avmb1_extcarddef))))
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 0c6c17a1c59e..ba2f6d1d7db7 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -1329,6 +1329,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
+ /* There should only be one entry, but go through the list
+ * anyway
+ */
++ if (afu->phb == NULL)
++ return result;
++
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (!afu_dev->driver)
+ continue;
+@@ -1369,6 +1372,10 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
+ */
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
++ /*
++ * Tell the AFU drivers; but we don't care what they
++ * say, we're going away.
++ */
+ cxl_vphb_error_detected(afu, state);
+ }
+ return PCI_ERS_RESULT_DISCONNECT;
+@@ -1492,6 +1499,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
+ if (cxl_afu_select_best_mode(afu))
+ goto err;
+
++ if (afu->phb == NULL)
++ continue;
++
+ cxl_pci_vphb_reconfigure(afu);
+
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+@@ -1556,6 +1566,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
+
++ if (afu->phb == NULL)
++ continue;
++
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (afu_dev->driver && afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->resume)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 4744919440e0..a38a9cb3d544 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2014,6 +2014,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+ return 0;
+ }
+
++static void bnxt_init_cp_rings(struct bnxt *bp)
++{
++ int i;
++
++ for (i = 0; i < bp->cp_nr_rings; i++) {
++ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
++ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
++
++ ring->fw_ring_id = INVALID_HW_RING_ID;
++ }
++}
++
+ static int bnxt_init_rx_rings(struct bnxt *bp)
+ {
+ int i, rc = 0;
+@@ -3977,6 +3989,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+
+ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+ {
++ bnxt_init_cp_rings(bp);
+ bnxt_init_rx_rings(bp);
+ bnxt_init_tx_rings(bp);
+ bnxt_init_ring_grps(bp, irq_re_init);
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+index 9e59663a6ead..0f6811860ad5 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
++++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+@@ -1930,13 +1930,13 @@ static void
+ bfa_ioc_send_enable(struct bfa_ioc *ioc)
+ {
+ struct bfi_ioc_ctrl_req enable_req;
+- struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.clscode = htons(ioc->clscode);
+- do_gettimeofday(&tv);
+- enable_req.tv_sec = ntohl(tv.tv_sec);
++ enable_req.rsvd = htons(0);
++ /* overflow in 2106 */
++ enable_req.tv_sec = ntohl(ktime_get_real_seconds());
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+ }
+
+@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
++ disable_req.clscode = htons(ioc->clscode);
++ disable_req.rsvd = htons(0);
++ /* overflow in 2106 */
++ disable_req.tv_sec = ntohl(ktime_get_real_seconds());
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+ }
+
+diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+index 8fc246ea1fb8..a4ad782007ce 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
++++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+@@ -324,7 +324,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ return PTR_ERR(kern_buf);
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+- if (rc < 2) {
++ if (rc < 2 || len > UINT_MAX >> 2) {
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
+ kfree(kern_buf);
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+index acfb8b1f88a7..a8f9d0012d82 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+@@ -126,6 +126,9 @@ process_mbx:
+ struct fm10k_mbx_info *mbx = &vf_info->mbx;
+ u16 glort = vf_info->glort;
+
++ /* process the SM mailbox first to drain outgoing messages */
++ hw->mbx.ops.process(hw, &hw->mbx);
++
+ /* verify port mapping is valid, if not reset port */
+ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
+ hw->iov.ops.reset_lport(hw, vf_info);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b5b228c9a030..06b38f50980c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4201,8 +4201,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+ if (!vsi->netdev)
+ return;
+
+- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+- napi_enable(&vsi->q_vectors[q_idx]->napi);
++ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++ if (q_vector->rx.ring || q_vector->tx.ring)
++ napi_enable(&q_vector->napi);
++ }
+ }
+
+ /**
+@@ -4216,8 +4220,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+ if (!vsi->netdev)
+ return;
+
+- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+- napi_disable(&vsi->q_vectors[q_idx]->napi);
++ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++ if (q_vector->rx.ring || q_vector->tx.ring)
++ napi_disable(&q_vector->napi);
++ }
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index c55552c3d2f9..53803fd6350c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3005,6 +3005,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
+ /* Setup and initialize a copy of the hw vlan table array */
+ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+ GFP_ATOMIC);
++ if (!adapter->shadow_vfta)
++ return -ENOMEM;
+
+ /* This call may decrease the number of queues */
+ if (igb_init_interrupt_scheme(adapter, true)) {
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index ce61b36b94f1..105dd00ddc1a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -3620,10 +3620,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+- fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+- (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
++ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
++ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index 31f864fb30c1..a75f2e3ce86f 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -564,6 +564,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32((offset + current_word) * 2);
+ buffer.length = cpu_to_be16(words_to_read * 2);
++ buffer.pad2 = 0;
++ buffer.pad3 = 0;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
+index a0849f49bbec..c0192f97ecc8 100644
+--- a/drivers/net/irda/vlsi_ir.c
++++ b/drivers/net/irda/vlsi_ir.c
+@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+ memset(rd, 0, sizeof(*rd));
+ rd->hw = hwmap + i;
+ rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+- if (rd->buf == NULL ||
+- !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
++ if (rd->buf)
++ busaddr = pci_map_single(pdev, rd->buf, len, dir);
++ if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
+ if (rd->buf) {
+ net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ __func__, rd->buf);
+@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+ rd = r->rd + j;
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+- if (busaddr)
+- pci_unmap_single(pdev, busaddr, len, dir);
++ pci_unmap_single(pdev, busaddr, len, dir);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 2d020a3ec0b5..37333d38b576 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -105,7 +105,7 @@ static int at803x_set_wol(struct phy_device *phydev,
+ mac = (const u8 *) ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+- return -EFAULT;
++ return -EINVAL;
+
+ for (i = 0; i < 3; i++) {
+ phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index e325ca3ad565..2cbecbda1ae3 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -410,6 +410,10 @@ static const struct usb_device_id products[] = {
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
++ { /* Motorola Mapphone devices with MDM6600 */
++ USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
++ .driver_info = (unsigned long)&qmi_wwan_info,
++ },
+
+ /* 2. Combined interface devices matching on class+protocol */
+ { /* Huawei E367 and possibly others in "Windows mode" */
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 1c27e6fb99f9..304ec25eaf95 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1207,6 +1207,7 @@ static void intr_callback(struct urb *urb)
+ }
+ } else {
+ if (netif_carrier_ok(tp->netdev)) {
++ netif_stop_queue(tp->netdev);
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
+@@ -1277,6 +1278,7 @@ static int alloc_all_mem(struct r8152 *tp)
+ spin_lock_init(&tp->rx_lock);
+ spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->tx_free);
++ INIT_LIST_HEAD(&tp->rx_done);
+ skb_queue_head_init(&tp->tx_queue);
+ skb_queue_head_init(&tp->rx_queue);
+
+@@ -3000,6 +3002,9 @@ static void set_carrier(struct r8152 *tp)
+ napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
++ } else if (netif_queue_stopped(netdev) &&
++ skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
++ netif_wake_queue(netdev);
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+@@ -3560,8 +3565,18 @@ static int rtl8152_resume(struct usb_interface *intf)
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ napi_disable(&tp->napi);
+ set_bit(WORK_ENABLE, &tp->flags);
+- if (netif_carrier_ok(tp->netdev))
+- rtl_start_rx(tp);
++
++ if (netif_carrier_ok(tp->netdev)) {
++ if (rtl8152_get_speed(tp) & LINK_STATUS) {
++ rtl_start_rx(tp);
++ } else {
++ netif_carrier_off(tp->netdev);
++ tp->rtl_ops.disable(tp);
++ netif_info(tp, link, tp->netdev,
++ "linking down\n");
++ }
++ }
++
+ napi_enable(&tp->napi);
+ } else {
+ tp->rtl_ops.up(tp);
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 357527712539..7680fc0349fc 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -161,7 +161,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
+ pci_device_add(virtfn, virtfn->bus);
+ mutex_unlock(&iov->dev->sriov->lock);
+
+- pci_bus_add_device(virtfn);
+ sprintf(buf, "virtfn%u", id);
+ rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ if (rc)
+@@ -172,6 +171,8 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
+
+ kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+
++ pci_bus_add_device(virtfn);
++
+ return 0;
+
+ failed2:
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 1a14ca8965e6..295bf1472d02 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3850,6 +3850,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
++
++ if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
++ return false;
++
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 4e14de0f0f98..ca5dbf03e388 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -388,7 +388,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ * If the error is reported by an end point, we think this
+ * error is related to the upstream link of the end point.
+ */
+- pci_walk_bus(dev->bus, cb, &result_data);
++ if (state == pci_channel_io_normal)
++ /*
++ * the error is non fatal so the bus is ok, just invoke
++ * the callback for the function that logged the error.
++ */
++ cb(dev, &result_data);
++ else
++ pci_walk_bus(dev->bus, cb, &result_data);
+ }
+
+ return result_data.result;
+diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
+index b58d3f29148a..6908b6ce2074 100644
+--- a/drivers/pinctrl/pinctrl-st.c
++++ b/drivers/pinctrl/pinctrl-st.c
+@@ -1338,6 +1338,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
+ writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
+ }
+
++static int st_gpio_irq_request_resources(struct irq_data *d)
++{
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++ st_gpio_direction_input(gc, d->hwirq);
++
++ return gpiochip_lock_as_irq(gc, d->hwirq);
++}
++
++static void st_gpio_irq_release_resources(struct irq_data *d)
++{
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++ gpiochip_unlock_as_irq(gc, d->hwirq);
++}
++
+ static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+@@ -1493,12 +1509,14 @@ static struct gpio_chip st_gpio_template = {
+ };
+
+ static struct irq_chip st_gpio_irqchip = {
+- .name = "GPIO",
+- .irq_disable = st_gpio_irq_mask,
+- .irq_mask = st_gpio_irq_mask,
+- .irq_unmask = st_gpio_irq_unmask,
+- .irq_set_type = st_gpio_irq_set_type,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .name = "GPIO",
++ .irq_request_resources = st_gpio_irq_request_resources,
++ .irq_release_resources = st_gpio_irq_release_resources,
++ .irq_disable = st_gpio_irq_mask,
++ .irq_mask = st_gpio_irq_mask,
++ .irq_unmask = st_gpio_irq_unmask,
++ .irq_set_type = st_gpio_irq_set_type,
++ .flags = IRQCHIP_SKIP_SET_WAKE,
+ };
+
+ static int st_gpiolib_register_bank(struct st_pinctrl *info,
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 9bb934ed2a7a..dcfd3655ef0a 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -764,7 +764,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ }
+
+ timerqueue_add(&rtc->timerqueue, &timer->node);
+- if (!next) {
++ if (!next || ktime_before(timer->node.expires, next->expires)) {
+ struct rtc_wkalrm alarm;
+ int err;
+ alarm.time = rtc_ktime_to_tm(timer->node.expires);
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index e1687e19c59f..a30f24cb6c83 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev)
+
+ dev_pm_clear_wake_irq(&adev->dev);
+ device_init_wakeup(&adev->dev, false);
+- free_irq(adev->irq[0], ldata);
++ if (adev->irq[0])
++ free_irq(adev->irq[0], ldata);
+ rtc_device_unregister(ldata->rtc);
+ iounmap(ldata->base);
+ kfree(ldata);
+@@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ goto out_no_rtc;
+ }
+
+- if (request_irq(adev->irq[0], pl031_interrupt,
+- vendor->irqflags, "rtc-pl031", ldata)) {
+- ret = -EIO;
+- goto out_no_irq;
++ if (adev->irq[0]) {
++ ret = request_irq(adev->irq[0], pl031_interrupt,
++ vendor->irqflags, "rtc-pl031", ldata);
++ if (ret)
++ goto out_no_irq;
++ dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
+ }
+- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
+ return 0;
+
+ out_no_irq:
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index bf3c1b2301db..0d6888cbd96e 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -2680,17 +2680,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
+ char daddr[16];
+ struct af_iucv_trans_hdr *iucv_hdr;
+
+- skb_pull(skb, 14);
+- card->dev->header_ops->create(skb, card->dev, 0,
+- card->dev->dev_addr, card->dev->dev_addr,
+- card->dev->addr_len);
+- skb_pull(skb, 14);
+- iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ memset(hdr, 0, sizeof(struct qeth_hdr));
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ hdr->hdr.l3.ext_flags = 0;
+- hdr->hdr.l3.length = skb->len;
++ hdr->hdr.l3.length = skb->len - ETH_HLEN;
+ hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
++
++ iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
+ memset(daddr, 0, sizeof(daddr));
+ daddr[0] = 0xfe;
+ daddr[1] = 0x80;
+@@ -2873,10 +2869,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
+ (skb_shinfo(skb)->nr_frags == 0)) {
+ new_skb = skb;
+- if (new_skb->protocol == ETH_P_AF_IUCV)
+- data_offset = 0;
+- else
+- data_offset = ETH_HLEN;
++ data_offset = ETH_HLEN;
+ hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ if (!hdr)
+ goto tx_drop;
+diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+index 804806e1cbb4..7a48905b8195 100644
+--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+@@ -1339,6 +1339,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
+ csk, csk->state, csk->flags, csk->tid);
+
+ cxgbi_sock_free_cpl_skbs(csk);
++ cxgbi_sock_purge_write_queue(csk);
+ if (csk->wr_cred != csk->wr_max_cred) {
+ cxgbi_sock_purge_wr_queue(csk);
+ cxgbi_sock_reset_wr_list(csk);
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index fc8f9b446556..fd8fe1202dbe 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -7491,7 +7491,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPRLI++;
+- if (vport->port_state < LPFC_DISC_AUTH) {
++ if ((vport->port_state < LPFC_DISC_AUTH) &&
++ (vport->fc_flag & FC_FABRIC)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index d3668aa555d5..be901f6db6d3 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -4777,7 +4777,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+ !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
++ !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
++ phba->sli_rev != LPFC_SLI_REV4) {
+ /* For this case we need to cleanup the default rpi
+ * allocated by the firmware.
+ */
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index f224cdb2fce4..507869bc0673 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -3180,7 +3180,7 @@ struct lpfc_mbx_get_port_name {
+ #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
+ #define MB_CQE_STATUS_DMA_FAILED 0x5
+
+-#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
++#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
+ struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index e333029e4b6c..e111c3d8c5d6 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -4588,6 +4588,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+ } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
+ scmd->result = DID_RESET << 16;
+ break;
++ } else if ((scmd->device->channel == RAID_CHANNEL) &&
++ (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
++ MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
++ scmd->result = DID_RESET << 16;
++ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
+index 36d07295f8e3..a56f6cac6fc5 100644
+--- a/drivers/thermal/hisi_thermal.c
++++ b/drivers/thermal/hisi_thermal.c
+@@ -389,8 +389,11 @@ static int hisi_thermal_suspend(struct device *dev)
+ static int hisi_thermal_resume(struct device *dev)
+ {
+ struct hisi_thermal_data *data = dev_get_drvdata(dev);
++ int ret;
+
+- clk_prepare_enable(data->clk);
++ ret = clk_prepare_enable(data->clk);
++ if (ret)
++ return ret;
+
+ data->irq_enabled = true;
+ hisi_thermal_enable_bind_irq_sensor(data);
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index c7689d05356c..f8a1881609a2 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
+ opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
+
++ /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
++ if (opts->streaming_maxburst &&
++ (opts->streaming_maxpacket % 1024) != 0) {
++ opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
++ INFO(cdev, "overriding streaming_maxpacket to %d\n",
++ opts->streaming_maxpacket);
++ }
++
+ /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
+ * module parameters.
+ *
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index 7a04157ff579..2806457b4748 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -1534,7 +1534,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
+ td = phys_to_virt(addr);
+ addr2 = (dma_addr_t)td->next;
+ pci_pool_free(dev->data_requests, td, addr);
+- td->next = 0x00;
+ addr = addr2;
+ }
+ req->chain_len = 1;
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 062cf8a84a59..7afd607ea60f 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -284,6 +284,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
+ static struct platform_driver usb_xhci_driver = {
+ .probe = xhci_plat_probe,
+ .remove = xhci_plat_remove,
++ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "xhci-hcd",
+ .pm = DEV_PM_OPS,
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index ae3c6b6fd5db..d0c79153081d 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
+ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
+ {
+ unsigned int lth = pb->lth_brightness;
+- int duty_cycle;
++ u64 duty_cycle;
+
+ if (pb->levels)
+ duty_cycle = pb->levels[brightness];
+ else
+ duty_cycle = brightness;
+
+- return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
++ duty_cycle *= pb->period - lth;
++ do_div(duty_cycle, pb->scale);
++
++ return duty_cycle + lth;
+ }
+
+ static int pwm_backlight_update_status(struct backlight_device *bl)
+diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
+index 70fffeba7495..a4441784503b 100644
+--- a/include/linux/mmu_context.h
++++ b/include/linux/mmu_context.h
+@@ -1,9 +1,16 @@
+ #ifndef _LINUX_MMU_CONTEXT_H
+ #define _LINUX_MMU_CONTEXT_H
+
++#include <asm/mmu_context.h>
++
+ struct mm_struct;
+
+ void use_mm(struct mm_struct *mm);
+ void unuse_mm(struct mm_struct *mm);
+
++/* Architectures that care about IRQ state in switch_mm can override this. */
++#ifndef switch_mm_irqs_off
++# define switch_mm_irqs_off switch_mm
++#endif
++
+ #endif
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 15874a85ebcf..9d6b3d869592 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -32,7 +32,7 @@
+ #include <linux/init.h>
+ #include <linux/uaccess.h>
+ #include <linux/highmem.h>
+-#include <asm/mmu_context.h>
++#include <linux/mmu_context.h>
+ #include <linux/interrupt.h>
+ #include <linux/capability.h>
+ #include <linux/completion.h>
+@@ -2708,7 +2708,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next);
+ } else
+- switch_mm(oldmm, mm, next);
++ switch_mm_irqs_off(oldmm, mm, next);
+
+ if (!prev->mm) {
+ prev->active_mm = NULL;
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index f802c2d216a7..6f4d27c5bb32 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -4,9 +4,9 @@
+ */
+
+ #include <linux/mm.h>
++#include <linux/sched.h>
+ #include <linux/mmu_context.h>
+ #include <linux/export.h>
+-#include <linux/sched.h>
+
+ #include <asm/mmu_context.h>
+
+diff --git a/mm/rmap.c b/mm/rmap.c
+index ede183c32f45..1bceb49aa214 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -587,19 +587,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
+ }
+
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+-static void percpu_flush_tlb_batch_pages(void *data)
+-{
+- /*
+- * All TLB entries are flushed on the assumption that it is
+- * cheaper to flush all TLBs and let them be refilled than
+- * flushing individual PFNs. Note that we do not track mm's
+- * to flush as that might simply be multiple full TLB flushes
+- * for no gain.
+- */
+- count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+- flush_tlb_local();
+-}
+-
+ /*
+ * Flush TLB entries for recently unmapped pages from remote CPUs. It is
+ * important if a PTE was dirty when it was unmapped that it's flushed
+@@ -616,15 +603,14 @@ void try_to_unmap_flush(void)
+
+ cpu = get_cpu();
+
+- trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
+-
+- if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
+- percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
+-
+- if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
+- smp_call_function_many(&tlb_ubc->cpumask,
+- percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
++ if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
++ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
++ local_flush_tlb();
++ trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+ }
++
++ if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
++ flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
+ cpumask_clear(&tlb_ubc->cpumask);
+ tlb_ubc->flush_required = false;
+ tlb_ubc->writable = false;
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index a6beb7b6ae55..f5ef2115871f 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -360,14 +360,16 @@ static struct ctl_table net_core_table[] = {
+ .data = &sysctl_net_busy_poll,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
+ },
+ {
+ .procname = "busy_read",
+ .data = &sysctl_net_busy_read,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
+ },
+ #endif
+ #ifdef CONFIG_NET_SCHED
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index e2e162432aa3..7057a1b09b5e 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -200,6 +200,7 @@ static void ip_expire(unsigned long arg)
+ qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
+ net = container_of(qp->q.net, struct net, ipv4.frags);
+
++ rcu_read_lock();
+ spin_lock(&qp->q.lock);
+
+ if (qp->q.flags & INET_FRAG_COMPLETE)
+@@ -209,7 +210,7 @@ static void ip_expire(unsigned long arg)
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+
+ if (!inet_frag_evicting(&qp->q)) {
+- struct sk_buff *head = qp->q.fragments;
++ struct sk_buff *clone, *head = qp->q.fragments;
+ const struct iphdr *iph;
+ int err;
+
+@@ -218,32 +219,40 @@ static void ip_expire(unsigned long arg)
+ if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+ goto out;
+
+- rcu_read_lock();
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (!head->dev)
+- goto out_rcu_unlock;
++ goto out;
++
+
+ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (err)
+- goto out_rcu_unlock;
++ goto out;
+
+ /* Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (frag_expire_skip_icmp(qp->user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+- goto out_rcu_unlock;
++ goto out;
++
++ clone = skb_clone(head, GFP_ATOMIC);
+
+ /* Send an ICMP "Fragment Reassembly Timeout" message. */
+- icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+-out_rcu_unlock:
+- rcu_read_unlock();
++ if (clone) {
++ spin_unlock(&qp->q.lock);
++ icmp_send(clone, ICMP_TIME_EXCEEDED,
++ ICMP_EXC_FRAGTIME, 0);
++ consume_skb(clone);
++ goto out_rcu_unlock;
++ }
+ }
+ out:
+ spin_unlock(&qp->q.lock);
++out_rcu_unlock:
++ rcu_read_unlock();
+ ipq_put(qp);
+ }
+
+diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+index 2689c9c4f1a0..182eb878633d 100644
+--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
+ .timeout = 180,
+ };
+
+-static struct nf_conntrack_helper snmp_helper __read_mostly = {
+- .me = THIS_MODULE,
+- .help = help,
+- .expect_policy = &snmp_exp_policy,
+- .name = "snmp",
+- .tuple.src.l3num = AF_INET,
+- .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
+- .tuple.dst.protonum = IPPROTO_UDP,
+-};
+-
+ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+ .me = THIS_MODULE,
+ .help = help,
+@@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+
+ static int __init nf_nat_snmp_basic_init(void)
+ {
+- int ret = 0;
+-
+ BUG_ON(nf_nat_snmp_hook != NULL);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, help);
+
+- ret = nf_conntrack_helper_register(&snmp_trap_helper);
+- if (ret < 0) {
+- nf_conntrack_helper_unregister(&snmp_helper);
+- return ret;
+- }
+- return ret;
++ return nf_conntrack_helper_register(&snmp_trap_helper);
+ }
+
+ static void __exit nf_nat_snmp_basic_fini(void)
+diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
+index 13951c4087d4..b9fac0522be6 100644
+--- a/net/ipv4/tcp_vegas.c
++++ b/net/ipv4/tcp_vegas.c
+@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
+
+ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
+ {
+- return min(tp->snd_ssthresh, tp->snd_cwnd-1);
++ return min(tp->snd_ssthresh, tp->snd_cwnd);
+ }
+
+ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 6d10002d23f8..8d34a488efc0 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+ MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+
++struct nfnl_cthelper {
++ struct list_head list;
++ struct nf_conntrack_helper helper;
++};
++
++static LIST_HEAD(nfnl_cthelper_list);
++
+ static int
+ nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+@@ -205,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
+ struct nf_conntrack_tuple *tuple)
+ {
+ struct nf_conntrack_helper *helper;
++ struct nfnl_cthelper *nfcth;
+ int ret;
+
+ if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
+ return -EINVAL;
+
+- helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
+- if (helper == NULL)
++ nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
++ if (nfcth == NULL)
+ return -ENOMEM;
++ helper = &nfcth->helper;
+
+ ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
+ if (ret < 0)
+- goto err;
++ goto err1;
+
+ strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
+ helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+@@ -247,14 +256,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
+
+ ret = nf_conntrack_helper_register(helper);
+ if (ret < 0)
+- goto err;
++ goto err2;
+
++ list_add_tail(&nfcth->list, &nfnl_cthelper_list);
+ return 0;
+-err:
+- kfree(helper);
++err2:
++ kfree(helper->expect_policy);
++err1:
++ kfree(nfcth);
+ return ret;
+ }
+
++static int
++nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
++ struct nf_conntrack_expect_policy *new_policy,
++ const struct nlattr *attr)
++{
++ struct nlattr *tb[NFCTH_POLICY_MAX + 1];
++ int err;
++
++ err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
++ nfnl_cthelper_expect_pol);
++ if (err < 0)
++ return err;
++
++ if (!tb[NFCTH_POLICY_NAME] ||
++ !tb[NFCTH_POLICY_EXPECT_MAX] ||
++ !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
++ return -EINVAL;
++
++ if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
++ return -EBUSY;
++
++ new_policy->max_expected =
++ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
++ new_policy->timeout =
++ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
++
++ return 0;
++}
++
++static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
++ struct nf_conntrack_helper *helper)
++{
++ struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
++ struct nf_conntrack_expect_policy *policy;
++ int i, err;
++
++ /* Check first that all policy attributes are well-formed, so we don't
++ * leave things in inconsistent state on errors.
++ */
++ for (i = 0; i < helper->expect_class_max + 1; i++) {
++
++ if (!tb[NFCTH_POLICY_SET + i])
++ return -EINVAL;
++
++ err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
++ &new_policy[i],
++ tb[NFCTH_POLICY_SET + i]);
++ if (err < 0)
++ return err;
++ }
++ /* Now we can safely update them. */
++ for (i = 0; i < helper->expect_class_max + 1; i++) {
++ policy = (struct nf_conntrack_expect_policy *)
++ &helper->expect_policy[i];
++ policy->max_expected = new_policy->max_expected;
++ policy->timeout = new_policy->timeout;
++ }
++
++ return 0;
++}
++
++static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
++ const struct nlattr *attr)
++{
++ struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
++ unsigned int class_max;
++ int err;
++
++ err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
++ nfnl_cthelper_expect_policy_set);
++ if (err < 0)
++ return err;
++
++ if (!tb[NFCTH_POLICY_SET_NUM])
++ return -EINVAL;
++
++ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
++ if (helper->expect_class_max + 1 != class_max)
++ return -EBUSY;
++
++ return nfnl_cthelper_update_policy_all(tb, helper);
++}
++
+ static int
+ nfnl_cthelper_update(const struct nlattr * const tb[],
+ struct nf_conntrack_helper *helper)
+@@ -265,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
+ return -EBUSY;
+
+ if (tb[NFCTH_POLICY]) {
+- ret = nfnl_cthelper_parse_expect_policy(helper,
+- tb[NFCTH_POLICY]);
++ ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
+ if (ret < 0)
+ return ret;
+ }
+@@ -295,7 +389,8 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ const char *helper_name;
+ struct nf_conntrack_helper *cur, *helper = NULL;
+ struct nf_conntrack_tuple tuple;
+- int ret = 0, i;
++ struct nfnl_cthelper *nlcth;
++ int ret = 0;
+
+ if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+ return -EINVAL;
+@@ -306,31 +401,22 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ if (ret < 0)
+ return ret;
+
+- rcu_read_lock();
+- for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
+- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
++ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
++ cur = &nlcth->helper;
+
+- /* skip non-userspace conntrack helpers. */
+- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+- continue;
++ if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++ continue;
+
+- if (strncmp(cur->name, helper_name,
+- NF_CT_HELPER_NAME_LEN) != 0)
+- continue;
++ if ((tuple.src.l3num != cur->tuple.src.l3num ||
++ tuple.dst.protonum != cur->tuple.dst.protonum))
++ continue;
+
+- if ((tuple.src.l3num != cur->tuple.src.l3num ||
+- tuple.dst.protonum != cur->tuple.dst.protonum))
+- continue;
++ if (nlh->nlmsg_flags & NLM_F_EXCL)
++ return -EEXIST;
+
+- if (nlh->nlmsg_flags & NLM_F_EXCL) {
+- ret = -EEXIST;
+- goto err;
+- }
+- helper = cur;
+- break;
+- }
++ helper = cur;
++ break;
+ }
+- rcu_read_unlock();
+
+ if (helper == NULL)
+ ret = nfnl_cthelper_create(tb, &tuple);
+@@ -338,9 +424,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ ret = nfnl_cthelper_update(tb, helper);
+
+ return ret;
+-err:
+- rcu_read_unlock();
+- return ret;
+ }
+
+ static int
+@@ -504,11 +587,12 @@ static int
+ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+ {
+- int ret = -ENOENT, i;
++ int ret = -ENOENT;
+ struct nf_conntrack_helper *cur;
+ struct sk_buff *skb2;
+ char *helper_name = NULL;
+ struct nf_conntrack_tuple tuple;
++ struct nfnl_cthelper *nlcth;
+ bool tuple_set = false;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+@@ -529,45 +613,39 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ tuple_set = true;
+ }
+
+- for (i = 0; i < nf_ct_helper_hsize; i++) {
+- hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
++ list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
++ cur = &nlcth->helper;
++ if (helper_name &&
++ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++ continue;
+
+- /* skip non-userspace conntrack helpers. */
+- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+- continue;
++ if (tuple_set &&
++ (tuple.src.l3num != cur->tuple.src.l3num ||
++ tuple.dst.protonum != cur->tuple.dst.protonum))
++ continue;
+
+- if (helper_name && strncmp(cur->name, helper_name,
+- NF_CT_HELPER_NAME_LEN) != 0) {
+- continue;
+- }
+- if (tuple_set &&
+- (tuple.src.l3num != cur->tuple.src.l3num ||
+- tuple.dst.protonum != cur->tuple.dst.protonum))
+- continue;
+-
+- skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+- if (skb2 == NULL) {
+- ret = -ENOMEM;
+- break;
+- }
++ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++ if (skb2 == NULL) {
++ ret = -ENOMEM;
++ break;
++ }
+
+- ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+- nlh->nlmsg_seq,
+- NFNL_MSG_TYPE(nlh->nlmsg_type),
+- NFNL_MSG_CTHELPER_NEW, cur);
+- if (ret <= 0) {
+- kfree_skb(skb2);
+- break;
+- }
++ ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
++ nlh->nlmsg_seq,
++ NFNL_MSG_TYPE(nlh->nlmsg_type),
++ NFNL_MSG_CTHELPER_NEW, cur);
++ if (ret <= 0) {
++ kfree_skb(skb2);
++ break;
++ }
+
+- ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+- MSG_DONTWAIT);
+- if (ret > 0)
+- ret = 0;
++ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
++ MSG_DONTWAIT);
++ if (ret > 0)
++ ret = 0;
+
+- /* this avoids a loop in nfnetlink. */
+- return ret == -EAGAIN ? -ENOBUFS : ret;
+- }
++ /* this avoids a loop in nfnetlink. */
++ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ return ret;
+ }
+@@ -578,10 +656,10 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ {
+ char *helper_name = NULL;
+ struct nf_conntrack_helper *cur;
+- struct hlist_node *tmp;
+ struct nf_conntrack_tuple tuple;
+ bool tuple_set = false, found = false;
+- int i, j = 0, ret;
++ struct nfnl_cthelper *nlcth, *n;
++ int j = 0, ret;
+
+ if (tb[NFCTH_NAME])
+ helper_name = nla_data(tb[NFCTH_NAME]);
+@@ -594,28 +672,27 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ tuple_set = true;
+ }
+
+- for (i = 0; i < nf_ct_helper_hsize; i++) {
+- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
+- hnode) {
+- /* skip non-userspace conntrack helpers. */
+- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+- continue;
++ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
++ cur = &nlcth->helper;
++ j++;
+
+- j++;
++ if (helper_name &&
++ strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++ continue;
+
+- if (helper_name && strncmp(cur->name, helper_name,
+- NF_CT_HELPER_NAME_LEN) != 0) {
+- continue;
+- }
+- if (tuple_set &&
+- (tuple.src.l3num != cur->tuple.src.l3num ||
+- tuple.dst.protonum != cur->tuple.dst.protonum))
+- continue;
++ if (tuple_set &&
++ (tuple.src.l3num != cur->tuple.src.l3num ||
++ tuple.dst.protonum != cur->tuple.dst.protonum))
++ continue;
+
+- found = true;
+- nf_conntrack_helper_unregister(cur);
+- }
++ found = true;
++ nf_conntrack_helper_unregister(cur);
++ kfree(cur->expect_policy);
++
++ list_del(&nlcth->list);
++ kfree(nlcth);
+ }
++
+ /* Make sure we return success if we flush and there is no helpers */
+ return (found || j == 0) ? 0 : -ENOENT;
+ }
+@@ -664,20 +741,16 @@ err_out:
+ static void __exit nfnl_cthelper_exit(void)
+ {
+ struct nf_conntrack_helper *cur;
+- struct hlist_node *tmp;
+- int i;
++ struct nfnl_cthelper *nlcth, *n;
+
+ nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
+
+- for (i=0; i<nf_ct_helper_hsize; i++) {
+- hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
+- hnode) {
+- /* skip non-userspace conntrack helpers. */
+- if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+- continue;
++ list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
++ cur = &nlcth->helper;
+
+- nf_conntrack_helper_unregister(cur);
+- }
++ nf_conntrack_helper_unregister(cur);
++ kfree(cur->expect_policy);
++ kfree(nlcth);
+ }
+ }
+
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 861c6615253b..f6837f9b6d6c 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -390,7 +390,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
+ GFP_ATOMIC);
+ if (!skb) {
+ skb_tx_error(entskb);
+- return NULL;
++ goto nlmsg_failure;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0,
+@@ -399,7 +399,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
+ if (!nlh) {
+ skb_tx_error(entskb);
+ kfree_skb(skb);
+- return NULL;
++ goto nlmsg_failure;
+ }
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = entry->state.pf;
+@@ -542,12 +542,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
+ }
+
+ nlh->nlmsg_len = skb->len;
++ if (seclen)
++ security_release_secctx(secdata, seclen);
+ return skb;
+
+ nla_put_failure:
+ skb_tx_error(entskb);
+ kfree_skb(skb);
+ net_err_ratelimited("nf_queue: error creating packet message\n");
++nlmsg_failure:
++ if (seclen)
++ security_release_secctx(secdata, seclen);
+ return NULL;
+ }
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a87afc4f3c91..5fabe68e20dd 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
+
+ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
+
++static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
++
++static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
++ "nlk_cb_mutex-ROUTE",
++ "nlk_cb_mutex-1",
++ "nlk_cb_mutex-USERSOCK",
++ "nlk_cb_mutex-FIREWALL",
++ "nlk_cb_mutex-SOCK_DIAG",
++ "nlk_cb_mutex-NFLOG",
++ "nlk_cb_mutex-XFRM",
++ "nlk_cb_mutex-SELINUX",
++ "nlk_cb_mutex-ISCSI",
++ "nlk_cb_mutex-AUDIT",
++ "nlk_cb_mutex-FIB_LOOKUP",
++ "nlk_cb_mutex-CONNECTOR",
++ "nlk_cb_mutex-NETFILTER",
++ "nlk_cb_mutex-IP6_FW",
++ "nlk_cb_mutex-DNRTMSG",
++ "nlk_cb_mutex-KOBJECT_UEVENT",
++ "nlk_cb_mutex-GENERIC",
++ "nlk_cb_mutex-17",
++ "nlk_cb_mutex-SCSITRANSPORT",
++ "nlk_cb_mutex-ECRYPTFS",
++ "nlk_cb_mutex-RDMA",
++ "nlk_cb_mutex-CRYPTO",
++ "nlk_cb_mutex-SMC",
++ "nlk_cb_mutex-23",
++ "nlk_cb_mutex-24",
++ "nlk_cb_mutex-25",
++ "nlk_cb_mutex-26",
++ "nlk_cb_mutex-27",
++ "nlk_cb_mutex-28",
++ "nlk_cb_mutex-29",
++ "nlk_cb_mutex-30",
++ "nlk_cb_mutex-31",
++ "nlk_cb_mutex-MAX_LINKS"
++};
++
+ static int netlink_dump(struct sock *sk);
+ static void netlink_skb_destructor(struct sk_buff *skb);
+
+@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
+ } else {
+ nlk->cb_mutex = &nlk->cb_def_mutex;
+ mutex_init(nlk->cb_mutex);
++ lockdep_set_class_and_name(nlk->cb_mutex,
++ nlk_cb_mutex_keys + protocol,
++ nlk_cb_mutex_key_strings[protocol]);
+ }
+ init_waitqueue_head(&nlk->wait);
+
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index d0dff0cd8186..cce4e6ada7fa 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -199,9 +199,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
+
+ if (p->set_tc_index) {
++ int wlen = skb_network_offset(skb);
++
+ switch (tc_skb_protocol(skb)) {
+ case htons(ETH_P_IP):
+- if (skb_cow_head(skb, sizeof(struct iphdr)))
++ wlen += sizeof(struct iphdr);
++ if (!pskb_may_pull(skb, wlen) ||
++ skb_try_make_writable(skb, wlen))
+ goto drop;
+
+ skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
+@@ -209,7 +213,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ break;
+
+ case htons(ETH_P_IPV6):
+- if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
++ wlen += sizeof(struct ipv6hdr);
++ if (!pskb_may_pull(skb, wlen) ||
++ skb_try_make_writable(skb, wlen))
+ goto drop;
+
+ skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index 8fef1b8d1fd8..cce9ae5ec93b 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -240,7 +240,8 @@ out_master_del:
+ out_err:
+ kfree(acomp);
+ bus->audio_component = NULL;
+- dev_err(dev, "failed to add i915 component master (%d)\n", ret);
++ hdac_acomp = NULL;
++ dev_info(dev, "failed to add i915 component master (%d)\n", ret);
+
+ return ret;
+ }
+@@ -273,6 +274,7 @@ int snd_hdac_i915_exit(struct hdac_bus *bus)
+
+ kfree(acomp);
+ bus->audio_component = NULL;
++ hdac_acomp = NULL;
+
+ return 0;
+ }
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index e2e08fc73b50..20512fe32a97 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2088,9 +2088,11 @@ static int azx_probe_continue(struct azx *chip)
+ * for other chips, still continue probing as other
+ * codecs can be on the same link.
+ */
+- if (CONTROLLER_IN_GPU(pci))
++ if (CONTROLLER_IN_GPU(pci)) {
++ dev_err(chip->card->dev,
++ "HSW/BDW HD-audio HDMI/DP requires binding with gfx driver\n");
+ goto out_free;
+- else
++ } else
+ goto skip_i915;
+ }
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index ac5de4365e15..c92b7ba344ef 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -261,6 +261,7 @@ enum {
+ CXT_FIXUP_HP_530,
+ CXT_FIXUP_CAP_MIX_AMP_5047,
+ CXT_FIXUP_MUTE_LED_EAPD,
++ CXT_FIXUP_HP_DOCK,
+ CXT_FIXUP_HP_SPECTRE,
+ CXT_FIXUP_HP_GATE_MIC,
+ };
+@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_mute_led_eapd,
+ },
++ [CXT_FIXUP_HP_DOCK] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x21011020 }, /* line-out */
++ { 0x18, 0x2181103f }, /* line-in */
++ { }
++ }
++ },
+ [CXT_FIXUP_HP_SPECTRE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+@@ -872,6 +882,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
+ { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
+ { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
++ { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
+ {}
+ };
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e5730a7d0480..2159b18f76bf 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4839,6 +4839,7 @@ enum {
+ ALC286_FIXUP_HP_GPIO_LED,
+ ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
+ ALC280_FIXUP_HP_DOCK_PINS,
++ ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
+ ALC280_FIXUP_HP_9480M,
+ ALC288_FIXUP_DELL_HEADSET_MODE,
+ ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -5377,6 +5378,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC280_FIXUP_HP_GPIO4
+ },
++ [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1b, 0x21011020 }, /* line-out */
++ { 0x18, 0x2181103f }, /* line-in */
++ { },
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
++ },
+ [ALC280_FIXUP_HP_9480M] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc280_fixup_hp_9480m,
+@@ -5629,7 +5640,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+- SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++ SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+@@ -5794,6 +5805,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
+ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
++ {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
+ {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+ {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
+ {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index cb092bd9965b..d080f06fd8d9 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -986,7 +986,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ * changes) is disallowed above, so any other attribute changes getting
+ * here can be skipped.
+ */
+- if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
++ if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ return r;
+ }