diff options
author | Mike Pagano <mpagano@gentoo.org> | 2015-10-23 15:40:50 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2015-10-23 15:40:50 -0400 |
commit | 1f68c4b7ed0ede1122e5f9c00c617c7cfee659f8 (patch) | |
tree | 3e75c374da8cf0207f4112cce2c670a845f47c4e | |
parent | Linux patch 3.14.54 (diff) | |
download | linux-patches-1f68c4b7ed0ede1122e5f9c00c617c7cfee659f8.tar.gz linux-patches-1f68c4b7ed0ede1122e5f9c00c617c7cfee659f8.tar.bz2 linux-patches-1f68c4b7ed0ede1122e5f9c00c617c7cfee659f8.zip |
Linux patch 3.14.553.14-61
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1054_linux-3.14.55.patch | 2917 |
2 files changed, 2921 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 12856a35..a1429953 100644 --- a/0000_README +++ b/0000_README @@ -258,6 +258,10 @@ Patch: 1053_linux-3.14.54.patch From: http://www.kernel.org Desc: Linux 3.14.54 +Patch: 1054_linux-3.14.55.patch +From: http://www.kernel.org +Desc: Linux 3.14.55 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1054_linux-3.14.55.patch b/1054_linux-3.14.55.patch new file mode 100644 index 00000000..f5b8d2d3 --- /dev/null +++ b/1054_linux-3.14.55.patch @@ -0,0 +1,2917 @@ +diff --git a/Makefile b/Makefile +index 22c91fa0411e..97d18c1d27f2 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,8 +1,8 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 54 ++SUBLEVEL = 55 + EXTRAVERSION = +-NAME = Kernel Recipes 2015 ++NAME = Remembering Coco + + # *DOCUMENTATION* + # To see a list of typical targets execute "make help" +diff --git a/arch/arm/Makefile b/arch/arm/Makefile +index 08a9ef58d9c3..6ca3f2ebaa9c 100644 +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -52,6 +52,14 @@ endif + + comma = , + ++# ++# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and ++# later may result in code being generated that handles signed short and signed ++# char struct members incorrectly. So disable it. ++# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932) ++# ++KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra) ++ + # This selects which instruction set is used. + # Note that GCC does not numerically define an architecture version + # macro, but instead defines a whole series of macros which makes +diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts +index 002fa70180a5..1186a50fc1de 100644 +--- a/arch/arm/boot/dts/omap5-uevm.dts ++++ b/arch/arm/boot/dts/omap5-uevm.dts +@@ -111,8 +111,8 @@ + + i2c5_pins: pinmux_i2c5_pins { + pinctrl-single,pins = < +- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ +- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ ++ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ ++ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ + >; + }; + +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index 04d63880037f..1e5a4fd25a50 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -353,12 +353,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, + */ + thumb = handler & 1; + +-#if __LINUX_ARM_ARCH__ >= 7 ++#if __LINUX_ARM_ARCH__ >= 6 + /* +- * Clear the If-Then Thumb-2 execution state +- * ARM spec requires this to be all 000s in ARM mode +- * Snapdragon S4/Krait misbehaves on a Thumb=>ARM +- * signal transition without this. ++ * Clear the If-Then Thumb-2 execution state. ARM spec ++ * requires this to be all 000s in ARM mode. Snapdragon ++ * S4/Krait misbehaves on a Thumb=>ARM signal transition ++ * without this. ++ * ++ * We must do this whenever we are running on a Thumb-2 ++ * capable CPU, which includes ARMv6T2. However, we elect ++ * to do this whenever we're on an ARMv6 or later CPU for ++ * simplicity. + */ + cpsr &= ~PSR_IT_MASK; + #endif +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index c23751b06120..cc083b6e4ce7 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -278,6 +278,7 @@ retry: + * starvation. + */ + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; ++ mm_flags |= FAULT_FLAG_TRIED; + goto retry; + } + } +diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h +deleted file mode 100644 +index 4e863daea25b..000000000000 +--- a/arch/hexagon/include/asm/barrier.h ++++ /dev/null +@@ -1,37 +0,0 @@ +-/* +- * Memory barrier definitions for the Hexagon architecture +- * +- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License version 2 and +- * only version 2 as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +- * 02110-1301, USA. +- */ +- +-#ifndef _ASM_BARRIER_H +-#define _ASM_BARRIER_H +- +-#define rmb() barrier() +-#define read_barrier_depends() barrier() +-#define wmb() barrier() +-#define mb() barrier() +-#define smp_rmb() barrier() +-#define smp_read_barrier_depends() barrier() +-#define smp_wmb() barrier() +-#define smp_mb() barrier() +- +-/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ +-#define set_mb(var, value) \ +- do { var = value; mb(); } while (0) +- +-#endif /* _ASM_BARRIER_H */ +diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h +index 5a822bb790f7..066e74f666ae 100644 +--- a/arch/m68k/include/asm/linkage.h ++++ b/arch/m68k/include/asm/linkage.h +@@ -4,4 +4,34 @@ + #define __ALIGN .align 4 + #define __ALIGN_STR ".align 4" + ++/* ++ * Make sure the compiler doesn't do anything stupid with the ++ * arguments on the stack - they are owned by the *caller*, not ++ * the callee. This just fools gcc into not spilling into them, ++ * and keeps it from doing tailcall recursion and/or using the ++ * stack slots for temporaries, since they are live and "used" ++ * all the way to the end of the function. ++ */ ++#define asmlinkage_protect(n, ret, args...) \ ++ __asmlinkage_protect##n(ret, ##args) ++#define __asmlinkage_protect_n(ret, args...) \ ++ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) ++#define __asmlinkage_protect0(ret) \ ++ __asmlinkage_protect_n(ret) ++#define __asmlinkage_protect1(ret, arg1) \ ++ __asmlinkage_protect_n(ret, "m" (arg1)) ++#define __asmlinkage_protect2(ret, arg1, arg2) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) ++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) ++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4)) ++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4), "m" (arg5)) ++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ ++ "m" (arg4), "m" (arg5), "m" (arg6)) ++ + #endif +diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c +index 44b6dff5aba2..a1087593b3c2 100644 +--- a/arch/mips/mm/dma-default.c ++++ b/arch/mips/mm/dma-default.c +@@ -94,7 +94,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) + else + #endif + #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) +- if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) ++ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) + dma_flag = __GFP_DMA; + else + #endif +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c +index 52c1162bcee3..4ca00128ae34 100644 +--- a/arch/powerpc/platforms/powernv/pci.c ++++ b/arch/powerpc/platforms/powernv/pci.c +@@ -109,6 +109,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev) + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; + struct msi_desc *entry; ++ irq_hw_number_t hwirq; + + if (WARN_ON(!phb)) + return; +@@ -116,10 +117,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev) + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; ++ hwirq = virq_to_hw(entry->irq); + irq_set_msi_desc(entry->irq, NULL); +- msi_bitmap_free_hwirqs(&phb->msi_bmp, +- virq_to_hw(entry->irq) - phb->msi_base, 1); + irq_dispose_mapping(entry->irq); ++ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); + } + } + #endif /* CONFIG_PCI_MSI */ +diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c +index 77efbaec7b9c..4a9b36777775 100644 +--- a/arch/powerpc/sysdev/fsl_msi.c ++++ b/arch/powerpc/sysdev/fsl_msi.c +@@ -121,15 +121,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) + { + struct msi_desc *entry; + struct fsl_msi *msi_data; ++ irq_hw_number_t hwirq; + + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; ++ hwirq = virq_to_hw(entry->irq); + msi_data = irq_get_chip_data(entry->irq); + irq_set_msi_desc(entry->irq, NULL); +- msi_bitmap_free_hwirqs(&msi_data->bitmap, +- virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); ++ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); + } + + return; +diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c +index 38e62382070c..9e14d82287a1 100644 +--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c ++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c +@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) + static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) + { + struct msi_desc *entry; ++ irq_hw_number_t hwirq; + + pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); + +@@ -81,10 +82,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) + if (entry->irq == NO_IRQ) + continue; + ++ hwirq = virq_to_hw(entry->irq); + irq_set_msi_desc(entry->irq, NULL); +- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, +- virq_to_hw(entry->irq), ALLOC_CHUNK); + irq_dispose_mapping(entry->irq); ++ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, ++ hwirq, ALLOC_CHUNK); + } + + return; +diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c +index 9a7aa0ed9c1c..dfc3486bf802 100644 +--- a/arch/powerpc/sysdev/mpic_u3msi.c ++++ b/arch/powerpc/sysdev/mpic_u3msi.c +@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) + static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) + { + struct msi_desc *entry; ++ irq_hw_number_t hwirq; + + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; + ++ hwirq = virq_to_hw(entry->irq); + irq_set_msi_desc(entry->irq, NULL); +- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, +- virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); ++ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); + } + + return; +diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c +index 43948da837a7..c3e65129940b 100644 +--- a/arch/powerpc/sysdev/ppc4xx_msi.c ++++ b/arch/powerpc/sysdev/ppc4xx_msi.c +@@ -121,16 +121,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) + { + struct msi_desc *entry; + struct ppc4xx_msi *msi_data = &ppc4xx_msi; ++ irq_hw_number_t hwirq; + + dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); + + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; ++ hwirq = virq_to_hw(entry->irq); + irq_set_msi_desc(entry->irq, NULL); +- msi_bitmap_free_hwirqs(&msi_data->bitmap, +- virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); ++ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); + } + } + +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 523f147b2470..b6ee63a69122 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -359,6 +359,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) + apic_write(APIC_LVTT, lvtt_value); + + if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) { ++ /* ++ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode, ++ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized. ++ * According to Intel, MFENCE can do the serialization here. ++ */ ++ asm volatile("mfence" : : : "memory"); ++ + printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); + return; + } +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 6d6ab2b0bdfa..f2f2b97b7cc4 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1684,7 +1684,18 @@ END(error_exit) + /* runs on exception stack */ + ENTRY(nmi) + INTR_FRAME ++ /* ++ * Fix up the exception frame if we're on Xen. ++ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most ++ * one value to the stack on native, so it may clobber the rdx ++ * scratch slot, but it won't clobber any of the important ++ * slots past it. ++ * ++ * Xen is a different story, because the Xen frame itself overlaps ++ * the "NMI executing" variable. ++ */ + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ + /* + * We allow breakpoints in NMIs. If a breakpoint occurs, then + * the iretq it performs will take us out of NMI context. +@@ -1736,8 +1747,11 @@ ENTRY(nmi) + * we don't want to enable interrupts, because then we'll end + * up in an awkward situation in which IRQs are on but NMIs + * are off. ++ * ++ * We also must not push anything to the stack before switching ++ * stacks lest we corrupt the "NMI executing" variable. + */ +- SWAPGS ++ SWAPGS_UNSAFE_STACK + cld + movq %rsp, %rdx + movq PER_CPU_VAR(kernel_stack), %rsp +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index 1b10af835c31..45c2045692bd 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -40,10 +40,18 @@ + #include <asm/timer.h> + #include <asm/special_insns.h> + +-/* nop stub */ +-void _paravirt_nop(void) +-{ +-} ++/* ++ * nop stub, which must not clobber anything *including the stack* to ++ * avoid confusing the entry prologues. ++ */ ++extern void _paravirt_nop(void); ++asm (".pushsection .entry.text, \"ax\"\n" ++ ".global _paravirt_nop\n" ++ "_paravirt_nop:\n\t" ++ "ret\n\t" ++ ".size _paravirt_nop, . - _paravirt_nop\n\t" ++ ".type _paravirt_nop, @function\n\t" ++ ".popsection"); + + /* identity function, which can be inlined */ + u32 _paravirt_ident_32(u32 x) +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index b20bced0090f..8bc924ff88ee 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -21,6 +21,7 @@ + #include <asm/hypervisor.h> + #include <asm/nmi.h> + #include <asm/x86_init.h> ++#include <asm/geode.h> + + unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ + EXPORT_SYMBOL(cpu_khz); +@@ -1011,15 +1012,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); + + static void __init check_system_tsc_reliable(void) + { +-#ifdef CONFIG_MGEODE_LX +- /* RTSC counts during suspend */ ++#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) ++ if (is_geode_lx()) { ++ /* RTSC counts during suspend */ + #define RTSC_SUSP 0x100 +- unsigned long res_low, res_high; ++ unsigned long res_low, res_high; + +- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); +- /* Geode_LX - the OLPC CPU has a very reliable TSC */ +- if (res_low & RTSC_SUSP) +- tsc_clocksource_reliable = 1; ++ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); ++ /* Geode_LX - the OLPC CPU has a very reliable TSC */ ++ if (res_low & RTSC_SUSP) ++ tsc_clocksource_reliable = 1; ++ } + #endif + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) + tsc_clocksource_reliable = 1; +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 074633411ea8..e23a539e2077 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -496,7 +496,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) + struct vcpu_svm *svm = to_svm(vcpu); + + if (svm->vmcb->control.next_rip != 0) { +- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); ++ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); + svm->next_rip = svm->vmcb->control.next_rip; + } + +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index 2308a401a1c5..0029f59588bd 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -1131,7 +1131,7 @@ void mark_rodata_ro(void) + * has been zapped already via cleanup_highmem(). + */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); +- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); ++ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); + + rodata_test(); + +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index abb81b0ad83f..ae7d543f23ed 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -961,6 +961,70 @@ out: + } + + /* ++ * Iterate the EFI memory map in reverse order because the regions ++ * will be mapped top-down. The end result is the same as if we had ++ * mapped things forward, but doesn't require us to change the ++ * existing implementation of efi_map_region(). ++ */ ++static inline void *efi_map_next_entry_reverse(void *entry) ++{ ++ /* Initial call */ ++ if (!entry) ++ return memmap.map_end - memmap.desc_size; ++ ++ entry -= memmap.desc_size; ++ if (entry < memmap.map) ++ return NULL; ++ ++ return entry; ++} ++ ++/* ++ * efi_map_next_entry - Return the next EFI memory map descriptor ++ * @entry: Previous EFI memory map descriptor ++ * ++ * This is a helper function to iterate over the EFI memory map, which ++ * we do in different orders depending on the current configuration. ++ * ++ * To begin traversing the memory map @entry must be %NULL. ++ * ++ * Returns %NULL when we reach the end of the memory map. ++ */ ++static void *efi_map_next_entry(void *entry) ++{ ++ if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { ++ /* ++ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE ++ * config table feature requires us to map all entries ++ * in the same order as they appear in the EFI memory ++ * map. That is to say, entry N must have a lower ++ * virtual address than entry N+1. This is because the ++ * firmware toolchain leaves relative references in ++ * the code/data sections, which are split and become ++ * separate EFI memory regions. Mapping things ++ * out-of-order leads to the firmware accessing ++ * unmapped addresses. ++ * ++ * Since we need to map things this way whether or not ++ * the kernel actually makes use of ++ * EFI_PROPERTIES_TABLE, let's just switch to this ++ * scheme by default for 64-bit. ++ */ ++ return efi_map_next_entry_reverse(entry); ++ } ++ ++ /* Initial call */ ++ if (!entry) ++ return memmap.map; ++ ++ entry += memmap.desc_size; ++ if (entry >= memmap.map_end) ++ return NULL; ++ ++ return entry; ++} ++ ++/* + * Map the efi memory ranges of the runtime services and update new_mmap with + * virtual addresses. + */ +@@ -970,7 +1034,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) + unsigned long left = 0; + efi_memory_desc_t *md; + +- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { ++ p = NULL; ++ while ((p = efi_map_next_entry(p))) { + md = p; + if (!(md->attribute & EFI_MEMORY_RUNTIME)) { + #ifdef CONFIG_X86_64 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 2302f10b1be6..4dca0d50762e 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -33,6 +33,10 @@ + #include <linux/memblock.h> + #include <linux/edd.h> + ++#ifdef CONFIG_KEXEC_CORE ++#include <linux/kexec.h> ++#endif ++ + #include <xen/xen.h> + #include <xen/events.h> + #include <xen/interface/xen.h> +@@ -1844,6 +1848,21 @@ static struct notifier_block xen_hvm_cpu_notifier = { + .notifier_call = xen_hvm_cpu_notify, + }; + ++#ifdef CONFIG_KEXEC_CORE ++static void xen_hvm_shutdown(void) ++{ ++ native_machine_shutdown(); ++ if (kexec_in_progress) ++ xen_reboot(SHUTDOWN_soft_reset); ++} ++ ++static void xen_hvm_crash_shutdown(struct pt_regs *regs) ++{ ++ native_machine_crash_shutdown(regs); ++ xen_reboot(SHUTDOWN_soft_reset); ++} ++#endif ++ + static void __init xen_hvm_guest_init(void) + { + init_hvm_pv_info(); +@@ -1860,6 +1879,10 @@ static void __init xen_hvm_guest_init(void) + x86_init.irqs.intr_init = xen_init_IRQ; + xen_hvm_init_time_ops(); + xen_hvm_init_mmu_ops(); ++#ifdef CONFIG_KEXEC_CORE ++ machine_ops.shutdown = xen_hvm_shutdown; ++ machine_ops.crash_shutdown = xen_hvm_crash_shutdown; ++#endif + } + + static uint32_t __init xen_hvm_platform(void) +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c +index d39fd610aa3b..1dca2a516bfd 100644 +--- a/drivers/base/regmap/regmap-debugfs.c ++++ b/drivers/base/regmap/regmap-debugfs.c +@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); + /* Calculate the length of a fixed format */ + static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) + { +- snprintf(buf, buf_size, "%x", max_val); +- return strlen(buf); ++ return snprintf(NULL, 0, "%x", max_val); + } + + static ssize_t regmap_name_read_file(struct file *file, +@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, + /* If we're in the region the user is trying to read */ + if (p >= *ppos) { + /* ...but not beyond it */ +- if (buf_pos >= count - 1 - tot_len) ++ if (buf_pos + tot_len + 1 >= count) + break; + + /* Format the register */ +diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c +index d3230234f07b..8c7b048bd0ab 100644 +--- a/drivers/clk/ti/clk-3xxx.c ++++ b/drivers/clk/ti/clk-3xxx.c +@@ -174,7 +174,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { + DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), + DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), + DT_CLK(NULL, "uart3_ick", "uart3_ick"), +- DT_CLK(NULL, "uart4_ick", "uart4_ick"), + DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), + DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), + DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), +@@ -317,6 +316,7 @@ static struct ti_dt_clk am35xx_clks[] = { + static struct ti_dt_clk omap36xx_clks[] = { + DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), + DT_CLK(NULL, "uart4_fck", "uart4_fck"), ++ DT_CLK(NULL, "uart4_ick", "uart4_ick"), + { .node_name = NULL }, + }; + +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c +index b0972b3869c7..3ae48ee2f488 100644 +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -1561,7 +1561,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) + INIT_LIST_HEAD(&dw->dma.channels); + for (i = 0; i < nr_channels; i++) { + struct dw_dma_chan *dwc = &dw->chan[i]; +- int r = nr_channels - i - 1; + + dwc->chan.device = &dw->dma; + dma_cookie_init(&dwc->chan); +@@ -1573,7 +1572,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) + + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) +- dwc->priority = r; ++ dwc->priority = nr_channels - i - 1; + else + dwc->priority = i; + +@@ -1593,6 +1592,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) + /* Hardware configuration */ + if (autocfg) { + unsigned int dwc_params; ++ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; + void __iomem *addr = chip->regs + r * sizeof(u32); + + dwc_params = dma_read_byaddr(addr, DWC_PARAMS); +diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c +index f6452682141b..8b8b0e3fc2a8 100644 +--- a/drivers/gpu/drm/drm_lock.c ++++ b/drivers/gpu/drm/drm_lock.c +@@ -58,6 +58,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + struct drm_master *master = file_priv->master; + int ret = 0; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + ++file_priv->lock_count; + + if (lock->context == DRM_KERNEL_CONTEXT) { +@@ -150,6 +153,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) + struct drm_lock *lock = data; + struct drm_master *master = file_priv->master; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + if (lock->context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + task_pid_nr(current), lock->context); +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c +index 8e3267a8bd4f..11f401ac6bdc 100644 +--- a/drivers/gpu/drm/qxl/qxl_display.c ++++ b/drivers/gpu/drm/qxl/qxl_display.c +@@ -552,7 +552,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, + adjusted_mode->hdisplay, + adjusted_mode->vdisplay); + +- if (qcrtc->index == 0) ++ if (bo->is_primary == false) + recreate_primary = true; + + if (bo->surf.stride * bo->surf.height > qdev->vram_size) { +@@ -816,13 +816,15 @@ static enum drm_connector_status qxl_conn_detect( + drm_connector_to_qxl_output(connector); + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = ddev->dev_private; +- int connected; ++ bool connected = false; + + /* The first monitor is always connected */ +- connected = (output->index == 0) || +- (qdev->client_monitors_config && +- qdev->client_monitors_config->count > output->index && +- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); ++ if (!qdev->client_monitors_config) { ++ if (output->index == 0) ++ connected = true; ++ } else ++ connected = qdev->client_monitors_config->count > output->index && ++ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); + + DRM_DEBUG("#%d connected: %d\n", output->index, connected); + if (!connected) +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c +index 20b69bff5b34..7393b76b6b63 100644 +--- a/drivers/hwmon/nct6775.c ++++ b/drivers/hwmon/nct6775.c +@@ -350,6 +350,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1] + + /* NCT6776 specific data */ + ++/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */ ++#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME ++#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME ++ + static const s8 NCT6776_ALARM_BITS[] = { + 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */ + 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */ +@@ -3476,8 +3480,8 @@ static int nct6775_probe(struct platform_device *pdev) + data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES; + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; + data->REG_PWM[0] = NCT6775_REG_PWM; + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; +@@ -3548,8 +3552,8 @@ static int nct6775_probe(struct platform_device *pdev) + data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; + data->REG_PWM[0] = NCT6775_REG_PWM; + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; +@@ -3624,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev) + data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES; + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT; + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME; +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME; +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME; ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME; ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H; + data->REG_PWM[0] = NCT6775_REG_PWM; + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index dd2b610552d5..a49ce4a6e72f 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2634,9 +2634,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) + static int + isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) + { +- int ret; ++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ++ int ret = 0; + + switch (state) { ++ case ISTATE_REMOVE: ++ spin_lock_bh(&conn->cmd_lock); ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ isert_put_cmd(isert_cmd, true); ++ break; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + ret = isert_put_nopin(cmd, conn, false); + break; +diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c +index 3ee198b65843..cc7ece1712b5 100644 +--- a/drivers/macintosh/windfarm_core.c ++++ b/drivers/macintosh/windfarm_core.c +@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb) + { + mutex_lock(&wf_lock); + blocking_notifier_chain_unregister(&wf_client_list, nb); +- wf_client_count++; ++ wf_client_count--; + if (wf_client_count == 0) + wf_stop_thread(); + mutex_unlock(&wf_lock); +diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c +index b04d1f904d07..2eca9084defe 100644 +--- a/drivers/md/dm-cache-policy-cleaner.c ++++ b/drivers/md/dm-cache-policy-cleaner.c +@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, + static struct dm_cache_policy_type wb_policy_type = { + .name = "cleaner", + .version = {1, 0, 0}, +- .hint_size = 0, ++ .hint_size = 4, + .owner = THIS_MODULE, + .create = wb_create + }; +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 59715389b3cf..19cfd7affebe 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -325,8 +325,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) + */ + if (min_region_size > (1 << 13)) { + /* If not a power of 2, make it the next power of 2 */ +- if (min_region_size & (min_region_size - 1)) +- region_size = 1 << fls(region_size); ++ region_size = roundup_pow_of_two(min_region_size); + DMINFO("Choosing default region size of %lu sectors", + region_size); + } else { +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 2ffd277eb311..31d14d88205b 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -5285,6 +5285,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes); + static void __md_stop(struct mddev *mddev) + { + mddev->ready = 0; ++ /* Ensure ->event_work is done */ ++ flush_workqueue(md_misc_wq); + mddev->pers->stop(mddev); + if (mddev->pers->sync_request && mddev->to_remove == NULL) + mddev->to_remove = &md_redundancy_group; +diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h +index bf2b80d5c470..8731b6ea026b 100644 +--- a/drivers/md/persistent-data/dm-btree-internal.h ++++ b/drivers/md/persistent-data/dm-btree-internal.h +@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key); + + extern struct dm_block_validator btree_node_validator; + ++/* ++ * Value type for upper levels of multi-level btrees. ++ */ ++extern void init_le64_type(struct dm_transaction_manager *tm, ++ struct dm_btree_value_type *vt); ++ + #endif /* DM_BTREE_INTERNAL_H */ +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c +index a03178e91a79..7c0d75547ccf 100644 +--- a/drivers/md/persistent-data/dm-btree-remove.c ++++ b/drivers/md/persistent-data/dm-btree-remove.c +@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, + return r; + } + +-static struct dm_btree_value_type le64_type = { +- .context = NULL, +- .size = sizeof(__le64), +- .inc = NULL, +- .dec = NULL, +- .equal = NULL +-}; +- + int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, dm_block_t *new_root) + { +@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, + int index = 0, r = 0; + struct shadow_spine spine; + struct btree_node *n; ++ struct dm_btree_value_type le64_vt; + ++ init_le64_type(info->tm, &le64_vt); + init_shadow_spine(&spine, info); + for (level = 0; level < info->levels; level++) { + r = remove_raw(&spine, info, + (level == last_level ? +- &info->value_type : &le64_type), ++ &info->value_type : &le64_vt), + root, keys[level], (unsigned *)&index); + if (r < 0) + break; +diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c +index 1b5e13ec7f96..0dee514ba4c5 100644 +--- a/drivers/md/persistent-data/dm-btree-spine.c ++++ b/drivers/md/persistent-data/dm-btree-spine.c +@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s) + { + return s->root; + } ++ ++static void le64_inc(void *context, const void *value_le) ++{ ++ struct dm_transaction_manager *tm = context; ++ __le64 v_le; ++ ++ memcpy(&v_le, value_le, sizeof(v_le)); ++ dm_tm_inc(tm, le64_to_cpu(v_le)); ++} ++ ++static void le64_dec(void *context, const void *value_le) ++{ ++ struct dm_transaction_manager *tm = context; ++ __le64 v_le; ++ ++ memcpy(&v_le, value_le, sizeof(v_le)); ++ dm_tm_dec(tm, le64_to_cpu(v_le)); ++} ++ ++static int le64_equal(void *context, const void *value1_le, const void *value2_le) ++{ ++ __le64 v1_le, v2_le; ++ ++ memcpy(&v1_le, value1_le, sizeof(v1_le)); ++ memcpy(&v2_le, value2_le, sizeof(v2_le)); ++ return v1_le == v2_le; ++} ++ ++void init_le64_type(struct dm_transaction_manager *tm, ++ struct dm_btree_value_type *vt) ++{ ++ vt->context = tm; ++ vt->size = sizeof(__le64); ++ vt->inc = le64_inc; ++ vt->dec = le64_dec; ++ vt->equal = le64_equal; ++} +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index fdd3793e22f9..c7726cebc495 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, + struct btree_node *n; + struct dm_btree_value_type le64_type; + +- le64_type.context = NULL; +- le64_type.size = sizeof(__le64); +- le64_type.inc = NULL; +- le64_type.dec = NULL; +- le64_type.equal = NULL; +- ++ init_le64_type(info->tm, &le64_type); + init_shadow_spine(&spine, info); + + for (level = 0; level < (info->levels - 1); level++) { +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c +index d058b00ba218..eb3823e25638 100644 +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -1463,6 +1463,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) + if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) + goto KEEP_CONFIG; + ++ /* Set a default chunk size */ ++ info->chunk_size = 512; ++ + ret = pxa3xx_nand_sensing(info); + if (ret) { + dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", +diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c +index d36134925d31..db657f2168d7 100644 +--- a/drivers/mtd/ubi/io.c ++++ b/drivers/mtd/ubi/io.c +@@ -921,6 +921,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, + goto bad; + } + ++ if (data_size > ubi->leb_size) { ++ ubi_err("bad data_size"); ++ goto bad; ++ } ++ + if (vol_type == UBI_VID_STATIC) { + /* + * Although from high-level point of view static volumes may +diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c +index d77b1c1d7c72..bebf49e0dbe9 100644 +--- a/drivers/mtd/ubi/vtbl.c ++++ b/drivers/mtd/ubi/vtbl.c +@@ -651,6 +651,7 @@ static int init_volumes(struct ubi_device *ubi, + if (ubi->corr_peb_count) + ubi_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); ++ return -ENOSPC; + } + ubi->rsvd_pebs += reserved_pebs; + ubi->avail_pebs -= reserved_pebs; +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index c6b0b078ab99..2060fef7f2d2 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -1974,6 +1974,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) + if (ubi->corr_peb_count) + ubi_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); ++ err = -ENOSPC; + goto out_free; + } + ubi->avail_pebs -= reserved_pebs; +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c +index 5f57e3d35e26..6adf9abdf955 100644 +--- a/drivers/scsi/3w-9xxx.c ++++ b/drivers/scsi/3w-9xxx.c +@@ -225,6 +225,17 @@ static const struct file_operations twa_fops = { + .llseek = noop_llseek, + }; + ++/* ++ * The controllers use an inline buffer instead of a mapped SGL for small, ++ * single entry buffers. Note that we treat a zero-length transfer like ++ * a mapped SGL. ++ */ ++static bool twa_command_mapped(struct scsi_cmnd *cmd) ++{ ++ return scsi_sg_count(cmd) != 1 || ++ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; ++} ++ + /* This function will complete an aen request from the isr */ + static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) + { +@@ -1351,7 +1362,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) + } + + /* Now complete the io */ +- scsi_dma_unmap(cmd); ++ if (twa_command_mapped(cmd)) ++ scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); +@@ -1594,7 +1606,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + cmd->result = (DID_RESET << 16); +- scsi_dma_unmap(cmd); ++ if (twa_command_mapped(cmd)) ++ scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); + } + } +@@ -1777,12 +1790,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + switch (retval) { + case SCSI_MLQUEUE_HOST_BUSY: +- scsi_dma_unmap(SCpnt); ++ if (twa_command_mapped(SCpnt)) ++ scsi_dma_unmap(SCpnt); + twa_free_request_id(tw_dev, request_id); + break; + case 1: + SCpnt->result = (DID_ERROR << 16); +- scsi_dma_unmap(SCpnt); ++ if (twa_command_mapped(SCpnt)) ++ scsi_dma_unmap(SCpnt); + done(SCpnt); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); +@@ -1843,8 +1858,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + /* Map sglist from scsi layer to cmd packet */ + + if (scsi_sg_count(srb)) { +- if ((scsi_sg_count(srb) == 1) && +- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { ++ if (!twa_command_mapped(srb)) { + if (srb->sc_data_direction == DMA_TO_DEVICE || + srb->sc_data_direction == DMA_BIDIRECTIONAL) + scsi_sg_copy_to_buffer(srb, +@@ -1917,7 +1931,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re + { + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + +- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && ++ if (!twa_command_mapped(cmd) && + (cmd->sc_data_direction == DMA_FROM_DEVICE || + cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { + if (scsi_sg_count(cmd) == 1) { +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c +index 96b6664bb1cf..787c8a883c3c 100644 +--- a/drivers/scsi/scsi_error.c ++++ b/drivers/scsi/scsi_error.c +@@ -2149,8 +2149,17 @@ int scsi_error_handler(void *data) + * We never actually get interrupted because kthread_run + * disables signal delivery for the created thread. + */ +- while (!kthread_should_stop()) { ++ while (true) { ++ /* ++ * The sequence in kthread_stop() sets the stop flag first ++ * then wakes the process. To avoid missed wakeups, the task ++ * should always be in a non running state before the stop ++ * flag is checked ++ */ + set_current_state(TASK_INTERRUPTIBLE); ++ if (kthread_should_stop()) ++ break; ++ + if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || + shost->host_failed != shost->host_busy) { + SCSI_LOG_ERROR_RECOVERY(1, +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 458a1480dc07..ebd32379b178 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -562,6 +562,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id) + if (!(sccr1_reg & SSCR1_TIE)) + mask &= ~SSSR_TFS; + ++ /* Ignore RX timeout interrupt if it is disabled */ ++ if (!(sccr1_reg & SSCR1_TINTE)) ++ mask &= ~SSSR_TINT; ++ + if (!(status & mask)) + return IRQ_NONE; + +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index f3e3ae8af709..d88492152be1 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1251,8 +1251,7 @@ static struct class spi_master_class = { + * + * The caller is responsible for assigning the bus number and initializing + * the master's methods before calling spi_register_master(); and (after errors +- * adding the device) calling spi_master_put() and kfree() to prevent a memory +- * leak. ++ * adding the device) calling spi_master_put() to prevent a memory leak. + */ + struct spi_master *spi_alloc_master(struct device *dev, unsigned size) + { +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c +index 574066ff73f8..c771b37967b2 100644 +--- a/drivers/staging/android/ion/ion.c ++++ b/drivers/staging/android/ion/ion.c +@@ -1119,13 +1119,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) + mutex_unlock(&client->lock); + goto end; + } +- mutex_unlock(&client->lock); + + handle = ion_handle_create(client, buffer); +- if (IS_ERR(handle)) ++ if (IS_ERR(handle)) { ++ mutex_unlock(&client->lock); + goto end; ++ } + +- mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { +diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c +index 88c60b6020c4..c4ee9fa1dc91 100644 +--- a/drivers/staging/comedi/drivers/usbduxsigma.c ++++ b/drivers/staging/comedi/drivers/usbduxsigma.c +@@ -575,37 +575,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev, + if (err) + return 3; + +- /* Step 4: fix up any arguments */ +- +- if (high_speed) { +- /* +- * every 2 channels get a time window of 125us. Thus, if we +- * sample all 16 channels we need 1ms. If we sample only one +- * channel we need only 125us +- */ +- devpriv->ai_interval = interval; +- devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval); +- } else { +- /* interval always 1ms */ +- devpriv->ai_interval = 1; +- devpriv->ai_timer = cmd->scan_begin_arg / 1000000; +- } +- if (devpriv->ai_timer < 1) +- err |= -EINVAL; +- +- if (cmd->stop_src == TRIG_COUNT) { +- /* data arrives as one packet */ +- devpriv->ai_sample_count = cmd->stop_arg; +- devpriv->ai_continuous = 0; +- } else { +- /* continuous acquisition */ +- devpriv->ai_continuous = 1; +- devpriv->ai_sample_count = 0; +- } +- +- if (err) +- return 4; +- + return 0; + } + +@@ -704,6 +673,33 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev, + + /* set current channel of the running acquisition to zero */ + s->async->cur_chan = 0; ++ ++ if (devpriv->high_speed) { ++ /* ++ * every 2 channels get a time window of 125us. Thus, if we ++ * sample all 16 channels we need 1ms. If we sample only one ++ * channel we need only 125us ++ */ ++ unsigned int interval = usbduxsigma_chans_to_interval(len); ++ ++ devpriv->ai_interval = interval; ++ devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval); ++ } else { ++ /* interval always 1ms */ ++ devpriv->ai_interval = 1; ++ devpriv->ai_timer = cmd->scan_begin_arg / 1000000; ++ } ++ ++ if (cmd->stop_src == TRIG_COUNT) { ++ /* data arrives as one packet */ ++ devpriv->ai_sample_count = cmd->stop_arg; ++ devpriv->ai_continuous = 0; ++ } else { ++ /* continuous acquisition */ ++ devpriv->ai_continuous = 1; ++ devpriv->ai_sample_count = 0; ++ } ++ + for (i = 0; i < len; i++) { + unsigned int chan = CR_CHAN(cmd->chanlist[i]); + +@@ -955,10 +951,24 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev, + if (err) + return 3; + +- /* Step 4: fix up any arguments */ ++ return 0; ++} ++ ++static int usbduxsigma_ao_cmd(struct comedi_device *dev, ++ struct comedi_subdevice *s) ++{ ++ struct usbduxsigma_private *devpriv = dev->private; ++ struct comedi_cmd *cmd = &s->async->cmd; ++ int ret; ++ int i; ++ ++ down(&devpriv->sem); ++ ++ /* set current channel of the running acquisition to zero */ ++ s->async->cur_chan = 0; + + /* we count in timer steps */ +- if (high_speed) { ++ if (cmd->convert_src == TRIG_TIMER) { + /* timing of the conversion itself: every 125 us */ + devpriv->ao_timer = cmd->convert_arg / 125000; + } else { +@@ -968,12 +978,9 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev, + */ + devpriv->ao_timer = cmd->scan_begin_arg / 1000000; + } +- if (devpriv->ao_timer < 1) +- err |= -EINVAL; +- + if (cmd->stop_src == TRIG_COUNT) { + /* not continuous, use counter */ +- if (high_speed) { ++ if (cmd->convert_src == TRIG_TIMER) { + /* high speed also scans everything at once */ + devpriv->ao_sample_count = cmd->stop_arg * + cmd->scan_end_arg; +@@ -992,24 +999,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev, + devpriv->ao_sample_count = 0; + } + +- if (err) +- return 4; +- +- return 0; +-} +- +-static int usbduxsigma_ao_cmd(struct comedi_device *dev, +- struct comedi_subdevice *s) +-{ +- struct usbduxsigma_private *devpriv = dev->private; +- struct comedi_cmd *cmd = &s->async->cmd; +- int ret; +- int i; +- +- down(&devpriv->sem); +- +- /* set current channel of the running acquisition to zero */ +- s->async->cur_chan = 0; + for (i = 0; i < cmd->chanlist_len; ++i) + devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]); + +diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c +index 4299cf45f947..5e1f16c36b49 100644 +--- a/drivers/staging/speakup/fakekey.c ++++ b/drivers/staging/speakup/fakekey.c +@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) + __this_cpu_write(reporting_keystroke, true); + input_report_key(virt_keyboard, KEY_DOWN, PRESSED); + input_report_key(virt_keyboard, KEY_DOWN, RELEASED); ++ input_sync(virt_keyboard); + __this_cpu_write(reporting_keystroke, false); + + /* reenable preemption */ +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 062967c90b2a..3ecc887eea27 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -113,7 +113,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + cfgno, inum, asnum, ep->desc.bEndpointAddress); + ep->ss_ep_comp.bmAttributes = 16; + } else if (usb_endpoint_xfer_isoc(&ep->desc) && +- desc->bmAttributes > 2) { ++ USB_SS_MULT(desc->bmAttributes) > 3) { + dev_warn(ddev, "Isoc endpoint has Mult of %d in " + "config %d interface %d altsetting %d ep %d: " + "setting to 3\n", desc->bmAttributes + 1, +@@ -122,7 +122,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + } + + if (usb_endpoint_xfer_isoc(&ep->desc)) +- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * ++ max_tx = (desc->bMaxBurst + 1) * ++ (USB_SS_MULT(desc->bmAttributes)) * + usb_endpoint_maxp(&ep->desc); + else if (usb_endpoint_xfer_int(&ep->desc)) + max_tx = usb_endpoint_maxp(&ep->desc) * +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index b195fdb1effc..804acc700327 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + ++ /* Logitech ConferenceCam CC3000e */ ++ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, ++ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, ++ ++ /* Logitech PTZ Pro Camera */ ++ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, ++ + /* Logitech Quickcam Fusion */ + { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, + +@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Philips PSC805 audio device */ + { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Plantronic Audio 655 DSP */ ++ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, ++ ++ /* Plantronic Audio 648 USB */ ++ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, ++ + /* Artisman Watchdog Dongle */ + { USB_DEVICE(0x04b4, 0x0526), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index f8893b32bbb6..86bfaf904ab5 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1402,10 +1402,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, + * use Event Data TRBs, and we don't chain in a link TRB on short + * transfers, we're basically dividing by 1. + * +- * xHCI 1.0 specification indicates that the Average TRB Length should +- * be set to 8 for control endpoints. ++ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length ++ * should be set to 8 for control endpoints. + */ +- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) ++ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) + ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); + else + ep_ctx->tx_info |= +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index a365e9769fcc..86a0ddd8efb7 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3223,9 +3223,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + struct xhci_td *td; + struct scatterlist *sg; + int num_sgs; +- int trb_buff_len, this_sg_len, running_total; ++ int trb_buff_len, this_sg_len, running_total, ret; + unsigned int total_packet_count; ++ bool zero_length_needed; + bool first_trb; ++ int last_trb_num; + u64 addr; + bool more_trbs_coming; + +@@ -3241,13 +3243,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, + usb_endpoint_maxp(&urb->ep->desc)); + +- trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], ++ ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + num_trbs, urb, 0, mem_flags); +- if (trb_buff_len < 0) +- return trb_buff_len; ++ if (ret < 0) ++ return ret; + + urb_priv = urb->hcpriv; ++ ++ /* Deal with URB_ZERO_PACKET - need one more td/trb */ ++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && ++ urb_priv->length == 2; ++ if (zero_length_needed) { ++ num_trbs++; ++ xhci_dbg(xhci, "Creating zero length td.\n"); ++ ret = prepare_transfer(xhci, xhci->devs[slot_id], ++ ep_index, urb->stream_id, ++ 1, urb, 1, mem_flags); ++ if (ret < 0) ++ return ret; ++ } ++ + td = urb_priv->td[0]; + + /* +@@ -3277,6 +3293,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + trb_buff_len = urb->transfer_buffer_length; + + first_trb = true; ++ last_trb_num = zero_length_needed ? 2 : 1; + /* Queue the first TRB, even if it's zero-length */ + do { + u32 field = 0; +@@ -3294,12 +3311,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + /* Chain all the TRBs together; clear the chain bit in the last + * TRB to indicate it's the last TRB in the chain. + */ +- if (num_trbs > 1) { ++ if (num_trbs > last_trb_num) { + field |= TRB_CHAIN; +- } else { +- /* FIXME - add check for ZERO_PACKET flag before this */ ++ } else if (num_trbs == last_trb_num) { + td->last_trb = ep_ring->enqueue; + field |= TRB_IOC; ++ } else if (zero_length_needed && num_trbs == 1) { ++ trb_buff_len = 0; ++ urb_priv->td[1]->last_trb = ep_ring->enqueue; ++ field |= TRB_IOC; + } + + /* Only set interrupt on short packet for IN endpoints */ +@@ -3361,7 +3381,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + if (running_total + trb_buff_len > urb->transfer_buffer_length) + trb_buff_len = + urb->transfer_buffer_length - running_total; +- } while (running_total < urb->transfer_buffer_length); ++ } while (num_trbs > 0); + + check_trb_math(urb, num_trbs, running_total); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, +@@ -3379,7 +3399,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + int num_trbs; + struct xhci_generic_trb *start_trb; + bool first_trb; ++ int last_trb_num; + bool more_trbs_coming; ++ bool zero_length_needed; + int start_cycle; + u32 field, length_field; + +@@ -3410,7 +3432,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + num_trbs++; + running_total += TRB_MAX_BUFF_SIZE; + } +- /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ + + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, +@@ -3419,6 +3440,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + return ret; + + urb_priv = urb->hcpriv; ++ ++ /* Deal with URB_ZERO_PACKET - need one more td/trb */ ++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && ++ urb_priv->length == 2; ++ if (zero_length_needed) { ++ num_trbs++; ++ xhci_dbg(xhci, "Creating zero length td.\n"); ++ ret = prepare_transfer(xhci, xhci->devs[slot_id], ++ ep_index, urb->stream_id, ++ 1, urb, 1, mem_flags); ++ if (ret < 0) ++ return ret; ++ } ++ + td = urb_priv->td[0]; + + /* +@@ -3440,7 +3475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + trb_buff_len = urb->transfer_buffer_length; + + first_trb = true; +- ++ last_trb_num = zero_length_needed ? 2 : 1; + /* Queue the first TRB, even if it's zero-length */ + do { + u32 remainder = 0; +@@ -3457,12 +3492,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + /* Chain all the TRBs together; clear the chain bit in the last + * TRB to indicate it's the last TRB in the chain. + */ +- if (num_trbs > 1) { ++ if (num_trbs > last_trb_num) { + field |= TRB_CHAIN; +- } else { +- /* FIXME - add check for ZERO_PACKET flag before this */ ++ } else if (num_trbs == last_trb_num) { + td->last_trb = ep_ring->enqueue; + field |= TRB_IOC; ++ } else if (zero_length_needed && num_trbs == 1) { ++ trb_buff_len = 0; ++ urb_priv->td[1]->last_trb = ep_ring->enqueue; ++ field |= TRB_IOC; + } + + /* Only set interrupt on short packet for IN endpoints */ +@@ -3500,7 +3538,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + trb_buff_len = urb->transfer_buffer_length - running_total; + if (trb_buff_len > TRB_MAX_BUFF_SIZE) + trb_buff_len = TRB_MAX_BUFF_SIZE; +- } while (running_total < urb->transfer_buffer_length); ++ } while (num_trbs > 0); + + check_trb_math(urb, num_trbs, running_total); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, +@@ -3567,8 +3605,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + if (start_cycle == 0) + field |= 0x1; + +- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ +- if (xhci->hci_version == 0x100) { ++ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ ++ if (xhci->hci_version >= 0x100) { + if (urb->transfer_buffer_length > 0) { + if (setup->bRequestType & USB_DIR_IN) + field |= TRB_TX_TYPE(TRB_DATA_IN); +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index fc61e663b00a..79c7b255e60a 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci) + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + if (!ret) +- xhci->xhc_state &= ~XHCI_STATE_HALTED; ++ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); ++ + return ret; + } + +@@ -1319,6 +1320,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + + if (usb_endpoint_xfer_isoc(&urb->ep->desc)) + size = urb->number_of_packets; ++ else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && ++ urb->transfer_buffer_length > 0 && ++ urb->transfer_flags & URB_ZERO_PACKET && ++ !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) ++ size = 2; + else + size = 1; + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 096438e4fb0c..c918075e5eae 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb); + #define ZTE_PRODUCT_MF622 0x0001 + #define ZTE_PRODUCT_MF628 0x0015 + #define ZTE_PRODUCT_MF626 0x0031 ++#define ZTE_PRODUCT_ZM8620_X 0x0396 ++#define ZTE_PRODUCT_ME3620_MBIM 0x0426 ++#define ZTE_PRODUCT_ME3620_X 0x1432 ++#define ZTE_PRODUCT_ME3620_L 0x1433 + #define ZTE_PRODUCT_AC2726 0xfff1 + #define ZTE_PRODUCT_CDMA_TECH 0xfffe + #define ZTE_PRODUCT_AC8710T 0xffff +@@ -549,6 +553,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3), + }; + ++static const struct option_blacklist_info zte_me3620_mbim_blacklist = { ++ .reserved = BIT(2) | BIT(3) | BIT(4), ++}; ++ ++static const struct option_blacklist_info zte_me3620_xl_blacklist = { ++ .reserved = BIT(3) | BIT(4) | BIT(5), ++}; ++ ++static const struct option_blacklist_info zte_zm8620_x_blacklist = { ++ .reserved = BIT(3) | BIT(4) | BIT(5), ++}; ++ + static const struct option_blacklist_info huawei_cdc12_blacklist = { + .reserved = BIT(1) | BIT(2), + }; +@@ -1579,6 +1595,14 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), ++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), ++ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist }, ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), ++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), ++ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, +diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c +index 6c3734d2b45a..d3ea90bef84d 100644 +--- a/drivers/usb/serial/whiteheat.c ++++ b/drivers/usb/serial/whiteheat.c +@@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial, + static int whiteheat_firmware_attach(struct usb_serial *serial); + + /* function prototypes for the Connect Tech WhiteHEAT serial converter */ ++static int whiteheat_probe(struct usb_serial *serial, ++ const struct usb_device_id *id); + static int whiteheat_attach(struct usb_serial *serial); + static void whiteheat_release(struct usb_serial *serial); + static int whiteheat_port_probe(struct usb_serial_port *port); +@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = { + .description = "Connect Tech - WhiteHEAT", + .id_table = id_table_std, + .num_ports = 4, ++ .probe = whiteheat_probe, + .attach = whiteheat_attach, + .release = whiteheat_release, + .port_probe = whiteheat_port_probe, +@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial) + /***************************************************************************** + * Connect Tech's White Heat serial driver functions + *****************************************************************************/ ++ ++static int whiteheat_probe(struct usb_serial *serial, ++ const struct usb_device_id *id) ++{ ++ struct usb_host_interface *iface_desc; ++ struct usb_endpoint_descriptor *endpoint; ++ size_t num_bulk_in = 0; ++ size_t num_bulk_out = 0; ++ size_t min_num_bulk; ++ unsigned int i; ++ ++ iface_desc = serial->interface->cur_altsetting; ++ ++ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ++ endpoint = &iface_desc->endpoint[i].desc; ++ if (usb_endpoint_is_bulk_in(endpoint)) ++ ++num_bulk_in; ++ if (usb_endpoint_is_bulk_out(endpoint)) ++ ++num_bulk_out; ++ } ++ ++ min_num_bulk = COMMAND_PORT + 1; ++ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int whiteheat_attach(struct usb_serial *serial) + { + struct usb_serial_port *command_port; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 8adfc65b37dd..332999288b51 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -2681,7 +2681,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, + bio_end_io_t end_io_func, + int mirror_num, + unsigned long prev_bio_flags, +- unsigned long bio_flags) ++ unsigned long bio_flags, ++ bool force_bio_submit) + { + int ret = 0; + struct bio *bio; +@@ -2699,6 +2700,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, + contig = bio_end_sector(bio) == sector; + + if (prev_bio_flags != bio_flags || !contig || ++ force_bio_submit || + merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || + bio_add_page(bio, page, page_size, offset) < page_size) { + ret = submit_one_bio(rw, bio, mirror_num, +@@ -2790,7 +2792,8 @@ static int __do_readpage(struct extent_io_tree *tree, + get_extent_t *get_extent, + struct extent_map **em_cached, + struct bio **bio, int mirror_num, +- unsigned long *bio_flags, int rw) ++ unsigned long *bio_flags, int rw, ++ u64 *prev_em_start) + { + struct inode *inode = page->mapping->host; + u64 start = page_offset(page); +@@ -2838,6 +2841,7 @@ static int __do_readpage(struct extent_io_tree *tree, + } + while (cur <= end) { + unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; ++ bool force_bio_submit = false; + + if (cur >= last_byte) { + char *userpage; +@@ -2888,6 +2892,49 @@ static int __do_readpage(struct extent_io_tree *tree, + block_start = em->block_start; + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) + block_start = EXTENT_MAP_HOLE; ++ ++ /* ++ * If we have a file range that points to a compressed extent ++ * and it's followed by a consecutive file range that points to ++ * to the same compressed extent (possibly with a different ++ * offset and/or length, so it either points to the whole extent ++ * or only part of it), we must make sure we do not submit a ++ * single bio to populate the pages for the 2 ranges because ++ * this makes the compressed extent read zero out the pages ++ * belonging to the 2nd range. Imagine the following scenario: ++ * ++ * File layout ++ * [0 - 8K] [8K - 24K] ++ * | | ++ * | | ++ * points to extent X, points to extent X, ++ * offset 4K, length of 8K offset 0, length 16K ++ * ++ * [extent X, compressed length = 4K uncompressed length = 16K] ++ * ++ * If the bio to read the compressed extent covers both ranges, ++ * it will decompress extent X into the pages belonging to the ++ * first range and then it will stop, zeroing out the remaining ++ * pages that belong to the other range that points to extent X. ++ * So here we make sure we submit 2 bios, one for the first ++ * range and another one for the third range. Both will target ++ * the same physical extent from disk, but we can't currently ++ * make the compressed bio endio callback populate the pages ++ * for both ranges because each compressed bio is tightly ++ * coupled with a single extent map, and each range can have ++ * an extent map with a different offset value relative to the ++ * uncompressed data of our extent and different lengths. This ++ * is a corner case so we prioritize correctness over ++ * non-optimal behavior (submitting 2 bios for the same extent). ++ */ ++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && ++ prev_em_start && *prev_em_start != (u64)-1 && ++ *prev_em_start != em->orig_start) ++ force_bio_submit = true; ++ ++ if (prev_em_start) ++ *prev_em_start = em->orig_start; ++ + free_extent_map(em); + em = NULL; + +@@ -2937,7 +2984,8 @@ static int __do_readpage(struct extent_io_tree *tree, + bdev, bio, pnr, + end_bio_extent_readpage, mirror_num, + *bio_flags, +- this_bio_flag); ++ this_bio_flag, ++ force_bio_submit); + if (!ret) { + nr++; + *bio_flags = this_bio_flag; +@@ -2964,7 +3012,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, + get_extent_t *get_extent, + struct extent_map **em_cached, + struct bio **bio, int mirror_num, +- unsigned long *bio_flags, int rw) ++ unsigned long *bio_flags, int rw, ++ u64 *prev_em_start) + { + struct inode *inode; + struct btrfs_ordered_extent *ordered; +@@ -2984,7 +3033,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, + + for (index = 0; index < nr_pages; index++) { + __do_readpage(tree, pages[index], get_extent, em_cached, bio, +- mirror_num, bio_flags, rw); ++ mirror_num, bio_flags, rw, prev_em_start); + page_cache_release(pages[index]); + } + } +@@ -2994,7 +3043,8 @@ static void __extent_readpages(struct extent_io_tree *tree, + int nr_pages, get_extent_t *get_extent, + struct extent_map **em_cached, + struct bio **bio, int mirror_num, +- unsigned long *bio_flags, int rw) ++ unsigned long *bio_flags, int rw, ++ u64 *prev_em_start) + { + u64 start = 0; + u64 end = 0; +@@ -3015,7 +3065,7 @@ static void __extent_readpages(struct extent_io_tree *tree, + index - first_index, start, + end, get_extent, em_cached, + bio, mirror_num, bio_flags, +- rw); ++ rw, prev_em_start); + start = page_start; + end = start + PAGE_CACHE_SIZE - 1; + first_index = index; +@@ -3026,7 +3076,8 @@ static void __extent_readpages(struct extent_io_tree *tree, + __do_contiguous_readpages(tree, &pages[first_index], + index - first_index, start, + end, get_extent, em_cached, bio, +- mirror_num, bio_flags, rw); ++ mirror_num, bio_flags, rw, ++ prev_em_start); + } + + static int __extent_read_full_page(struct extent_io_tree *tree, +@@ -3052,7 +3103,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, + } + + ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, +- bio_flags, rw); ++ bio_flags, rw, NULL); + return ret; + } + +@@ -3078,7 +3129,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, + int ret; + + ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, +- &bio_flags, READ); ++ &bio_flags, READ, NULL); + if (bio) + ret = submit_one_bio(READ, bio, mirror_num, bio_flags); + return ret; +@@ -3347,7 +3398,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, + sector, iosize, pg_offset, + bdev, &epd->bio, max_nr, + end_bio_extent_writepage, +- 0, 0, 0); ++ 0, 0, 0, false); + if (ret) + SetPageError(page); + } +@@ -3516,7 +3567,7 @@ static int write_one_eb(struct extent_buffer *eb, + ret = submit_extent_page(rw, tree, p, offset >> 9, + PAGE_CACHE_SIZE, 0, bdev, &epd->bio, + -1, end_bio_extent_buffer_writepage, +- 0, epd->bio_flags, bio_flags); ++ 0, epd->bio_flags, bio_flags, false); + epd->bio_flags = bio_flags; + if (ret) { + set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); +@@ -3918,6 +3969,7 @@ int extent_readpages(struct extent_io_tree *tree, + struct page *page; + struct extent_map *em_cached = NULL; + int nr = 0; ++ u64 prev_em_start = (u64)-1; + + for (page_idx = 0; page_idx < nr_pages; page_idx++) { + page = list_entry(pages->prev, struct page, lru); +@@ -3934,12 +3986,12 @@ int extent_readpages(struct extent_io_tree *tree, + if (nr < ARRAY_SIZE(pagepool)) + continue; + __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, +- &bio, 0, &bio_flags, READ); ++ &bio, 0, &bio_flags, READ, &prev_em_start); + nr = 0; + } + if (nr) + __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, +- &bio, 0, &bio_flags, READ); ++ &bio, 0, &bio_flags, READ, &prev_em_start); + + if (em_cached) + free_extent_map(em_cached); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 653cdd85e0f2..eaf8699ed559 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -4668,7 +4668,8 @@ void btrfs_evict_inode(struct inode *inode) + goto no_delete; + } + /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ +- btrfs_wait_ordered_range(inode, 0, (u64)-1); ++ if (!special_file(inode->i_mode)) ++ btrfs_wait_ordered_range(inode, 0, (u64)-1); + + if (root->fs_info->log_root_recovering) { + BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c +index 4934347321d3..3299778391fd 100644 +--- a/fs/cifs/cifsencrypt.c ++++ b/fs/cifs/cifsencrypt.c +@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) + return 0; + } + ++/* Server has provided av pairs/target info in the type 2 challenge ++ * packet and we have plucked it and stored within smb session. ++ * We parse that blob here to find the server given timestamp ++ * as part of ntlmv2 authentication (or local current time as ++ * default in case of failure) ++ */ ++static __le64 ++find_timestamp(struct cifs_ses *ses) ++{ ++ unsigned int attrsize; ++ unsigned int type; ++ unsigned int onesize = sizeof(struct ntlmssp2_name); ++ unsigned char *blobptr; ++ unsigned char *blobend; ++ struct ntlmssp2_name *attrptr; ++ ++ if (!ses->auth_key.len || !ses->auth_key.response) ++ return 0; ++ ++ blobptr = ses->auth_key.response; ++ blobend = blobptr + ses->auth_key.len; ++ ++ while (blobptr + onesize < blobend) { ++ attrptr = (struct ntlmssp2_name *) blobptr; ++ type = le16_to_cpu(attrptr->type); ++ if (type == NTLMSSP_AV_EOL) ++ break; ++ blobptr += 2; /* advance attr type */ ++ attrsize = le16_to_cpu(attrptr->length); ++ blobptr += 2; /* advance attr size */ ++ if (blobptr + attrsize > blobend) ++ break; ++ if (type == NTLMSSP_AV_TIMESTAMP) { ++ if (attrsize == sizeof(u64)) ++ return *((__le64 *)blobptr); ++ } ++ blobptr += attrsize; /* advance attr value */ ++ } ++ ++ return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); ++} ++ + static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, + const struct nls_table *nls_cp) + { +@@ -637,6 +679,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + struct ntlmv2_resp *ntlmv2; + char ntlmv2_hash[16]; + unsigned char *tiblob = NULL; /* target info blob */ ++ __le64 rsp_timestamp; + + if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { + if (!ses->domainName) { +@@ -655,6 +698,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + } + } + ++ /* Must be within 5 minutes of the server (or in range +/-2h ++ * in case of Mac OS X), so simply carry over server timestamp ++ * (as Windows 7 does) ++ */ ++ rsp_timestamp = find_timestamp(ses); ++ + baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); + tilen = ses->auth_key.len; + tiblob = ses->auth_key.response; +@@ -671,8 +720,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + (ses->auth_key.response + CIFS_SESS_KEY_SIZE); + ntlmv2->blob_signature = cpu_to_le32(0x00000101); + ntlmv2->reserved = 0; +- /* Must be within 5 minutes of the server */ +- ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); ++ ntlmv2->time = rsp_timestamp; ++ + get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); + ntlmv2->reserved2 = 0; + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 30f3eb5bc022..6aeb1de0fa23 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -49,9 +49,13 @@ change_conf(struct TCP_Server_Info *server) + break; + default: + server->echoes = true; +- server->oplocks = true; ++ if (enable_oplocks) { ++ server->oplocks = true; ++ server->oplock_credits = 1; ++ } else ++ server->oplocks = false; ++ + server->echo_credits = 1; +- server->oplock_credits = 1; + } + server->credits -= server->echo_credits + server->oplock_credits; + return 0; +diff --git a/fs/dcache.c b/fs/dcache.c +index df323f809e03..65ccdf0e2854 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2787,6 +2787,13 @@ restart: + + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { + struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); ++ /* Escaped? */ ++ if (dentry != vfsmnt->mnt_root) { ++ bptr = *buffer; ++ blen = *buflen; ++ error = 3; ++ break; ++ } + /* Global root? */ + if (mnt != parent) { + dentry = ACCESS_ONCE(mnt->mnt_mountpoint); +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c +index b892355f1944..d4c7e470dec8 100644 +--- a/fs/jbd2/checkpoint.c ++++ b/fs/jbd2/checkpoint.c +@@ -475,14 +475,15 @@ int jbd2_cleanup_journal_tail(journal_t *journal) + * journal_clean_one_cp_list + * + * Find all the written-back checkpoint buffers in the given list and +- * release them. ++ * release them. If 'destroy' is set, clean all buffers unconditionally. + * + * Called with the journal locked. + * Called with j_list_lock held. + * Returns number of buffers reaped (for debug) + */ + +-static int journal_clean_one_cp_list(struct journal_head *jh, int *released) ++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy, ++ int *released) + { + struct journal_head *last_jh; + struct journal_head *next_jh = jh; +@@ -496,7 +497,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released) + do { + jh = next_jh; + next_jh = jh->b_cpnext; +- ret = __try_to_free_cp_buf(jh); ++ if (!destroy) ++ ret = __try_to_free_cp_buf(jh); ++ else ++ ret = __jbd2_journal_remove_checkpoint(jh) + 1; + if (ret) { + freed++; + if (ret == 2) { +@@ -521,13 +525,14 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released) + * journal_clean_checkpoint_list + * + * Find all the written-back checkpoint buffers in the journal and release them. ++ * If 'destroy' is set, release all buffers unconditionally. + * + * Called with the journal locked. + * Called with j_list_lock held. + * Returns number of buffers reaped (for debug) + */ + +-int __jbd2_journal_clean_checkpoint_list(journal_t *journal) ++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) + { + transaction_t *transaction, *last_transaction, *next_transaction; + int ret = 0; +@@ -543,7 +548,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal) + transaction = next_transaction; + next_transaction = transaction->t_cpnext; + ret += journal_clean_one_cp_list(transaction-> +- t_checkpoint_list, &released); ++ t_checkpoint_list, destroy, &released); + /* + * This function only frees up some memory if possible so we + * dont have an obligation to finish processing. Bail out if +@@ -559,7 +564,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal) + * we can possibly see not yet submitted buffers on io_list + */ + ret += journal_clean_one_cp_list(transaction-> +- t_checkpoint_io_list, &released); ++ t_checkpoint_io_list, destroy, &released); + if (need_resched()) + goto out; + } while (transaction != last_transaction); +@@ -568,6 +573,28 @@ out: + } + + /* ++ * Remove buffers from all checkpoint lists as journal is aborted and we just ++ * need to free memory ++ */ ++void jbd2_journal_destroy_checkpoint(journal_t *journal) ++{ ++ /* ++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort ++ * early due to a need of rescheduling. ++ */ ++ while (1) { ++ spin_lock(&journal->j_list_lock); ++ if (!journal->j_checkpoint_transactions) { ++ spin_unlock(&journal->j_list_lock); ++ break; ++ } ++ __jbd2_journal_clean_checkpoint_list(journal, true); ++ spin_unlock(&journal->j_list_lock); ++ cond_resched(); ++ } ++} ++ ++/* + * journal_remove_checkpoint: called after a buffer has been committed + * to disk (either by being write-back flushed to disk, or being + * committed to the log). +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c +index 9181c2b22b3c..4207cf2caa87 100644 +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) + * frees some memory + */ + spin_lock(&journal->j_list_lock); +- __jbd2_journal_clean_checkpoint_list(journal); ++ __jbd2_journal_clean_checkpoint_list(journal, false); + spin_unlock(&journal->j_list_lock); + + jbd_debug(3, "JBD2: commit phase 1\n"); +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index e8d62d742435..3b607a8609c4 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal) + while (journal->j_checkpoint_transactions != NULL) { + spin_unlock(&journal->j_list_lock); + mutex_lock(&journal->j_checkpoint_mutex); +- jbd2_log_do_checkpoint(journal); ++ err = jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); ++ /* ++ * If checkpointing failed, just free the buffers to avoid ++ * looping forever ++ */ ++ if (err) { ++ jbd2_journal_destroy_checkpoint(journal); ++ spin_lock(&journal->j_list_lock); ++ break; ++ } + spin_lock(&journal->j_list_lock); + } + +diff --git a/fs/namei.c b/fs/namei.c +index c6fa07942b2a..f4f6460b6958 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -484,6 +484,24 @@ void path_put(const struct path *path) + } + EXPORT_SYMBOL(path_put); + ++/** ++ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root ++ * @path: nameidate to verify ++ * ++ * Rename can sometimes move a file or directory outside of a bind ++ * mount, path_connected allows those cases to be detected. ++ */ ++static bool path_connected(const struct path *path) ++{ ++ struct vfsmount *mnt = path->mnt; ++ ++ /* Only bind mounts can have disconnected paths */ ++ if (mnt->mnt_root == mnt->mnt_sb->s_root) ++ return true; ++ ++ return is_subdir(path->dentry, mnt->mnt_root); ++} ++ + /* + * Path walking has 2 modes, rcu-walk and ref-walk (see + * Documentation/filesystems/path-lookup.txt). In situations when we can't +@@ -1149,6 +1167,8 @@ static int follow_dotdot_rcu(struct nameidata *nd) + goto failed; + nd->path.dentry = parent; + nd->seq = seq; ++ if (unlikely(!path_connected(&nd->path))) ++ goto failed; + break; + } + if (!follow_up_rcu(&nd->path)) +@@ -1242,7 +1262,7 @@ static void follow_mount(struct path *path) + } + } + +-static void follow_dotdot(struct nameidata *nd) ++static int follow_dotdot(struct nameidata *nd) + { + if (!nd->root.mnt) + set_root(nd); +@@ -1258,6 +1278,10 @@ static void follow_dotdot(struct nameidata *nd) + /* rare case of legitimate dget_parent()... */ + nd->path.dentry = dget_parent(nd->path.dentry); + dput(old); ++ if (unlikely(!path_connected(&nd->path))) { ++ path_put(&nd->path); ++ return -ENOENT; ++ } + break; + } + if (!follow_up(&nd->path)) +@@ -1265,6 +1289,7 @@ static void follow_dotdot(struct nameidata *nd) + } + follow_mount(&nd->path); + nd->inode = nd->path.dentry->d_inode; ++ return 0; + } + + /* +@@ -1488,7 +1513,7 @@ static inline int handle_dots(struct nameidata *nd, int type) + if (follow_dotdot_rcu(nd)) + return -ECHILD; + } else +- follow_dotdot(nd); ++ return follow_dotdot(nd); + } + return 0; + } +@@ -2214,7 +2239,7 @@ mountpoint_last(struct nameidata *nd, struct path *path) + if (unlikely(nd->last_type != LAST_NORM)) { + error = handle_dots(nd, nd->last_type); + if (error) +- goto out; ++ return error; + dentry = dget(nd->path.dentry); + goto done; + } +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h +index e1fb0f613a99..385593d748f6 100644 +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + extern void jbd2_journal_commit_transaction(journal_t *); + + /* Checkpoint list management */ +-int __jbd2_journal_clean_checkpoint_list(journal_t *journal); ++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); + int __jbd2_journal_remove_checkpoint(struct journal_head *); ++void jbd2_journal_destroy_checkpoint(journal_t *journal); + void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + + +diff --git a/include/linux/security.h b/include/linux/security.h +index 2fc42d191f79..2a8f853750c7 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -2452,7 +2452,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, + unsigned long arg4, + unsigned long arg5) + { +- return cap_task_prctl(option, arg2, arg3, arg3, arg5); ++ return cap_task_prctl(option, arg2, arg3, arg4, arg5); + } + + static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) +diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h +index 9ce083960a25..f18490985fc8 100644 +--- a/include/xen/interface/sched.h ++++ b/include/xen/interface/sched.h +@@ -107,5 +107,13 @@ struct sched_watchdog { + #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ + #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ + #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ ++/* ++ * Domain asked to perform 'soft reset' for it. The expected behavior is to ++ * reset internal Xen state for the domain returning it to the point where it ++ * was created but leaving the domain's memory contents and vCPU contexts ++ * intact. This will allow the domain to start over and set up all Xen specific ++ * interfaces again. ++ */ ++#define SHUTDOWN_soft_reset 5 + + #endif /* __XEN_PUBLIC_SCHED_H__ */ +diff --git a/ipc/msg.c b/ipc/msg.c +index 649853105a5d..4a036c619607 100644 +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -202,13 +202,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) + return retval; + } + +- /* ipc_addid() locks msq upon success. */ +- id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); +- if (id < 0) { +- ipc_rcu_putref(msq, msg_rcu_free); +- return id; +- } +- + msq->q_stime = msq->q_rtime = 0; + msq->q_ctime = get_seconds(); + msq->q_cbytes = msq->q_qnum = 0; +@@ -218,6 +211,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) + INIT_LIST_HEAD(&msq->q_receivers); + INIT_LIST_HEAD(&msq->q_senders); + ++ /* ipc_addid() locks msq upon success. */ ++ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); ++ if (id < 0) { ++ ipc_rcu_putref(msq, msg_rcu_free); ++ return id; ++ } ++ + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); + +diff --git a/ipc/shm.c b/ipc/shm.c +index 76459616a7fa..ada866d768a6 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -543,12 +543,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + if (IS_ERR(file)) + goto no_file; + +- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); +- if (id < 0) { +- error = id; +- goto no_id; +- } +- + shp->shm_cprid = task_tgid_vnr(current); + shp->shm_lprid = 0; + shp->shm_atim = shp->shm_dtim = 0; +@@ -558,6 +552,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + shp->shm_file = file; + shp->shm_creator = current; + ++ id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); ++ if (id < 0) { ++ error = id; ++ goto no_id; ++ } ++ + /* + * shmid gets reported as "inode#" in /proc/pid/maps. + * proc-ps tools use this. Changing this will break them. +diff --git a/ipc/util.c b/ipc/util.c +index e1b4c6db8aa0..cdb19ce3f358 100644 +--- a/ipc/util.c ++++ b/ipc/util.c +@@ -277,6 +277,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) + rcu_read_lock(); + spin_lock(&new->lock); + ++ current_euid_egid(&euid, &egid); ++ new->cuid = new->uid = euid; ++ new->gid = new->cgid = egid; ++ + id = idr_alloc(&ids->ipcs_idr, new, + (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, + GFP_NOWAIT); +@@ -289,10 +293,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) + + ids->in_use++; + +- current_euid_egid(&euid, &egid); +- new->cuid = new->uid = euid; +- new->gid = new->cgid = egid; +- + if (next_id < 0) { + new->seq = ids->seq++; + if (ids->seq > IPCID_SEQ_MAX) +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c +index 095cd7230aef..56d7272199ff 100644 +--- a/kernel/irq/proc.c ++++ b/kernel/irq/proc.c +@@ -12,6 +12,7 @@ + #include <linux/seq_file.h> + #include <linux/interrupt.h> + #include <linux/kernel_stat.h> ++#include <linux/mutex.h> + + #include "internals.h" + +@@ -326,18 +327,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) + + void register_irq_proc(unsigned int irq, struct irq_desc *desc) + { ++ static DEFINE_MUTEX(register_lock); + char name [MAX_NAMELEN]; + +- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) ++ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) + return; + ++ /* ++ * irq directories are registered only when a handler is ++ * added, not when the descriptor is created, so multiple ++ * tasks might try to register at the same time. ++ */ ++ mutex_lock(®ister_lock); ++ ++ if (desc->dir) ++ goto out_unlock; ++ + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + desc->dir = proc_mkdir(name, root_irq_dir); + if (!desc->dir) +- return; ++ goto out_unlock; + + #ifdef CONFIG_SMP + /* create /proc/irq/<irq>/smp_affinity */ +@@ -358,6 +370,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) + + proc_create_data("spurious", 0444, desc->dir, + &irq_spurious_proc_fops, (void *)(long)irq); ++ ++out_unlock: ++ mutex_unlock(®ister_lock); + } + + void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 5e973efc036e..a19262a7d70b 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2136,11 +2136,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) + * If a task dies, then it sets TASK_DEAD in tsk->state and calls + * schedule one last time. The schedule call will never return, and + * the scheduled task must drop that reference. +- * The test for TASK_DEAD must occur while the runqueue locks are +- * still held, otherwise prev could be scheduled on another cpu, die +- * there before we look at prev->state, and then the reference would +- * be dropped twice. +- * Manfred Spraul <manfred@colorfullife.com> ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_lock_switch), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. + */ + prev_state = prev->state; + vtime_task_switch(prev); +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index f964add50f38..835b6efa8bd6 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -994,9 +994,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) + * After ->on_cpu is cleared, the task can be moved to a different CPU. + * We must ensure this doesn't happen until the switch is completely + * finished. ++ * ++ * Pairs with the control dependency and rmb in try_to_wake_up(). + */ +- smp_wmb(); +- prev->on_cpu = 0; ++ smp_store_release(&prev->on_cpu, 0); + #endif + #ifdef CONFIG_DEBUG_SPINLOCK + /* this is a valid case when another task releases the spinlock */ +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index c3e8660cb616..86cbb2f13715 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2615,6 +2615,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + continue; + + /* ++ * Shared VMAs have their own reserves and do not affect ++ * MAP_PRIVATE accounting but it is possible that a shared ++ * VMA is using the same page so check and skip such VMAs. ++ */ ++ if (iter_vma->vm_flags & VM_MAYSHARE) ++ continue; ++ ++ /* + * Unmap the page from other VMAs without their own reserves. + * They get marked to be SIGKILLed if they fault in these + * areas. This is because a future no-page fault on this VMA +diff --git a/mm/slab.c b/mm/slab.c +index 0b1c2a58559d..844ea1e89568 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -2271,9 +2271,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) + size += BYTES_PER_WORD; + } + #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) +- if (size >= kmalloc_size(INDEX_NODE + 1) +- && cachep->object_size > cache_line_size() +- && ALIGN(size, cachep->align) < PAGE_SIZE) { ++ /* ++ * To activate debug pagealloc, off-slab management is necessary ++ * requirement. In early phase of initialization, small sized slab ++ * doesn't get initialized so it would not be possible. So, we need ++ * to check size >= 256. It guarantees that all necessary small ++ * sized slab is initialized in current slab initialization sequence. ++ */ ++ if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && ++ size >= 256 && cachep->object_size > cache_line_size() && ++ ALIGN(size, cachep->align) < PAGE_SIZE) { + cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); + size = PAGE_SIZE; + } +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c +index aeedc3a961a1..99ae718b79be 100644 +--- a/net/core/fib_rules.c ++++ b/net/core/fib_rules.c +@@ -631,7 +631,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWRULE, + NLM_F_MULTI, ops); +- if (err) ++ if (err < 0) + break; + skip: + idx++; +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index a8027e73b6a2..a108953a8c2c 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, + pkts = atomic_add_return(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); +- ip_vs_sync_conn(net, cp->control, pkts); ++ ip_vs_sync_conn(net, cp, pkts); + } + } + +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 1692e7534759..c3d204973dbc 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -129,7 +129,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, + + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = daddr; +- fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; + fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? + FLOWI_FLAG_KNOWN_NH : 0; + +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c +index 4fd1ca94fd4a..71c46f463969 100644 +--- a/net/netfilter/nf_conntrack_expect.c ++++ b/net/netfilter/nf_conntrack_expect.c +@@ -202,7 +202,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, + a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; + } + +- return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); ++ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) && ++ nf_ct_zone(a->master) == nf_ct_zone(b->master); + } + + static inline int expect_matches(const struct nf_conntrack_expect *a, +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index b9f0e0374322..7d5dcd2a9092 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -2927,11 +2927,6 @@ ctnetlink_create_expect(struct net *net, u16 zone, + } + + err = nf_ct_expect_related_report(exp, portid, report); +- if (err < 0) +- goto err_exp; +- +- return 0; +-err_exp: + nf_ct_expect_put(exp); + err_ct: + nf_ct_put(ct); +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c +index 969589590814..82273b83de8e 100644 +--- a/net/netfilter/nft_compat.c ++++ b/net/netfilter/nft_compat.c +@@ -594,6 +594,13 @@ struct nft_xt { + + static struct nft_expr_type nft_match_type; + ++static bool nft_match_cmp(const struct xt_match *match, ++ const char *name, u32 rev, u32 family) ++{ ++ return strcmp(match->name, name) == 0 && match->revision == rev && ++ (match->family == NFPROTO_UNSPEC || match->family == family); ++} ++ + static const struct nft_expr_ops * + nft_match_select_ops(const struct nft_ctx *ctx, + const struct nlattr * const tb[]) +@@ -601,7 +608,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, + struct nft_xt *nft_match; + struct xt_match *match; + char *mt_name; +- __u32 rev, family; ++ u32 rev, family; + + if (tb[NFTA_MATCH_NAME] == NULL || + tb[NFTA_MATCH_REV] == NULL || +@@ -616,8 +623,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, + list_for_each_entry(nft_match, &nft_match_list, head) { + struct xt_match *match = nft_match->ops.data; + +- if (strcmp(match->name, mt_name) == 0 && +- match->revision == rev && match->family == family) { ++ if (nft_match_cmp(match, mt_name, rev, family)) { + if (!try_module_get(match->me)) + return ERR_PTR(-ENOENT); + +@@ -669,6 +675,13 @@ static LIST_HEAD(nft_target_list); + + static struct nft_expr_type nft_target_type; + ++static bool nft_target_cmp(const struct xt_target *tg, ++ const char *name, u32 rev, u32 family) ++{ ++ return strcmp(tg->name, name) == 0 && tg->revision == rev && ++ (tg->family == NFPROTO_UNSPEC || tg->family == family); ++} ++ + static const struct nft_expr_ops * + nft_target_select_ops(const struct nft_ctx *ctx, + const struct nlattr * const tb[]) +@@ -676,7 +689,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, + struct nft_xt *nft_target; + struct xt_target *target; + char *tg_name; +- __u32 rev, family; ++ u32 rev, family; + + if (tb[NFTA_TARGET_NAME] == NULL || + tb[NFTA_TARGET_REV] == NULL || +@@ -691,8 +704,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, + list_for_each_entry(nft_target, &nft_target_list, head) { + struct xt_target *target = nft_target->ops.data; + +- if (strcmp(target->name, tg_name) == 0 && +- target->revision == rev && target->family == family) { ++ if (nft_target_cmp(target, tg_name, rev, family)) { + if (!try_module_get(target->me)) + return ERR_PTR(-ENOENT); + +diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig +index 885683a3b0bd..e0406211716b 100644 +--- a/sound/arm/Kconfig ++++ b/sound/arm/Kconfig +@@ -9,6 +9,14 @@ menuconfig SND_ARM + Drivers that are implemented on ASoC can be found in + "ALSA for SoC audio support" section. + ++config SND_PXA2XX_LIB ++ tristate ++ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 ++ select SND_DMAENGINE_PCM ++ ++config SND_PXA2XX_LIB_AC97 ++ bool ++ + if SND_ARM + + config SND_ARMAACI +@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM + tristate + select SND_PCM + +-config SND_PXA2XX_LIB +- tristate +- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97 +- +-config SND_PXA2XX_LIB_AC97 +- bool +- + config SND_PXA2XX_AC97 + tristate "AC97 driver for the Intel PXA2xx chip" + depends on ARCH_PXA +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c +index 7b0aac9d27ca..3c90743fa50b 100644 +--- a/sound/pci/hda/patch_cirrus.c ++++ b/sound/pci/hda/patch_cirrus.c +@@ -637,6 +637,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { + SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), + SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), ++ SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), + {} /* terminator */ + }; + +diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c +index 2f6357578616..1b6cbbc95456 100644 +--- a/sound/soc/dwc/designware_i2s.c ++++ b/sound/soc/dwc/designware_i2s.c +@@ -100,10 +100,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream) + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) +- i2s_write_reg(dev->i2s_base, TOR(i), 0); ++ i2s_read_reg(dev->i2s_base, TOR(i)); + } else { + for (i = 0; i < 4; i++) +- i2s_write_reg(dev->i2s_base, ROR(i), 0); ++ i2s_read_reg(dev->i2s_base, ROR(i)); + } + } + +diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig +index 6473052b6899..9f4ea3f2bbb5 100644 +--- a/sound/soc/pxa/Kconfig ++++ b/sound/soc/pxa/Kconfig +@@ -1,7 +1,6 @@ + config SND_PXA2XX_SOC + tristate "SoC Audio for the Intel PXA2xx chip" + depends on ARCH_PXA +- select SND_ARM + select SND_PXA2XX_LIB + help + Say Y or M if you want to add support for codecs attached to +@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97 + config SND_PXA2XX_SOC_AC97 + tristate + select AC97_BUS +- select SND_ARM + select SND_PXA2XX_LIB_AC97 + select SND_SOC_AC97_BUS + +diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c +index ae956e3f4b9d..593e3202fc35 100644 +--- a/sound/soc/pxa/pxa2xx-ac97.c ++++ b/sound/soc/pxa/pxa2xx-ac97.c +@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = { + .reset = pxa2xx_ac97_cold_reset, + }; + +-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12; ++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11; + static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { + .addr = __PREG(PCDR), + .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { + .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, + }; + +-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11; ++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12; + static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { + .addr = __PREG(PCDR), + .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c +index daf61abc3670..646b66703bd8 100644 +--- a/sound/synth/emux/emux_oss.c ++++ b/sound/synth/emux/emux_oss.c +@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu) + struct snd_seq_oss_reg *arg; + struct snd_seq_device *dev; + +- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, ++ /* using device#1 here for avoiding conflicts with OPL3 */ ++ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, + sizeof(struct snd_seq_oss_reg), &dev) < 0) + return; + +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index 8b0e1c9234d9..8801d5cdafae 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -1108,7 +1108,7 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) + static void print_aggr(char *prefix) + { + struct perf_evsel *counter; +- int cpu, cpu2, s, s2, id, nr; ++ int cpu, s, s2, id, nr; + double uval; + u64 ena, run, val; + +@@ -1121,8 +1121,7 @@ static void print_aggr(char *prefix) + val = ena = run = 0; + nr = 0; + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { +- cpu2 = perf_evsel__cpus(counter)->map[cpu]; +- s2 = aggr_get_id(evsel_list->cpus, cpu2); ++ s2 = aggr_get_id(perf_evsel__cpus(counter), cpu); + if (s2 != id) + continue; + val += counter->counts->cpu[cpu].val; +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c +index 893f8e2df928..96592f7bfa9f 100644 +--- a/tools/perf/util/header.c ++++ b/tools/perf/util/header.c +@@ -1715,7 +1715,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, + if (ph->needs_swap) + nr = bswap_32(nr); + +- ph->env.nr_cpus_online = nr; ++ ph->env.nr_cpus_avail = nr; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) +@@ -1724,7 +1724,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, + if (ph->needs_swap) + nr = bswap_32(nr); + +- ph->env.nr_cpus_avail = nr; ++ ph->env.nr_cpus_online = nr; + return 0; + } + +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c +index e4e6249b87d4..24a506974610 100644 +--- a/tools/perf/util/hist.c ++++ b/tools/perf/util/hist.c +@@ -160,6 +160,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) + hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); + hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); + ++ if (h->srcline) ++ hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline)); ++ + if (h->transaction) + hists__new_col_len(hists, HISTC_TRANSACTION, + hist_entry__transaction_len()); +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 516d19fb999b..8bd904bd9009 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -1100,8 +1100,6 @@ out_close: + static int kcore__init(struct kcore *kcore, char *filename, int elfclass, + bool temp) + { +- GElf_Ehdr *ehdr; +- + kcore->elfclass = elfclass; + + if (temp) +@@ -1118,9 +1116,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, + if (!gelf_newehdr(kcore->elf, elfclass)) + goto out_end; + +- ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); +- if (!ehdr) +- goto out_end; ++ memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); + + return 0; + +@@ -1177,23 +1173,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) + static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, + u64 addr, u64 len) + { +- GElf_Phdr gphdr; +- GElf_Phdr *phdr; +- +- phdr = gelf_getphdr(kcore->elf, idx, &gphdr); +- if (!phdr) +- return -1; +- +- phdr->p_type = PT_LOAD; +- phdr->p_flags = PF_R | PF_W | PF_X; +- phdr->p_offset = offset; +- phdr->p_vaddr = addr; +- phdr->p_paddr = 0; +- phdr->p_filesz = len; +- phdr->p_memsz = len; +- phdr->p_align = page_size; +- +- if (!gelf_update_phdr(kcore->elf, idx, phdr)) ++ GElf_Phdr phdr = { ++ .p_type = PT_LOAD, ++ .p_flags = PF_R | PF_W | PF_X, ++ .p_offset = offset, ++ .p_vaddr = addr, ++ .p_paddr = 0, ++ .p_filesz = len, ++ .p_memsz = len, ++ .p_align = page_size, ++ }; ++ ++ if (!gelf_update_phdr(kcore->elf, idx, &phdr)) + return -1; + + return 0; +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index eed250e9c218..d7d950f51b55 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -2797,10 +2797,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) + static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, + const struct kvm_io_range *r2) + { +- if (r1->addr < r2->addr) ++ gpa_t addr1 = r1->addr; ++ gpa_t addr2 = r2->addr; ++ ++ if (addr1 < addr2) + return -1; +- if (r1->addr + r1->len > r2->addr + r2->len) ++ ++ /* If r2->len == 0, match the exact address. If r2->len != 0, ++ * accept any overlapping write. Any order is acceptable for ++ * overlapping ranges, because kvm_io_bus_get_first_dev ensures ++ * we process all of them. ++ */ ++ if (r2->len) { ++ addr1 += r1->len; ++ addr2 += r2->len; ++ } ++ ++ if (addr1 > addr2) + return 1; ++ + return 0; + } + |