From 910ecd3eef3cf941070f0586d5de977a58c16ba7 Mon Sep 17 00:00:00 2001 From: Mike Pagano Date: Sat, 4 Apr 2015 19:53:54 -0400 Subject: Linux patch 3.18.11 --- 0000_README | 4 + 1010_linux-3.18.11.patch | 3765 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3769 insertions(+) create mode 100644 1010_linux-3.18.11.patch diff --git a/0000_README b/0000_README index ab13f61a..7616ed8b 100644 --- a/0000_README +++ b/0000_README @@ -83,6 +83,10 @@ Patch: 1009_linux-3.18.10.patch From: http://www.kernel.org Desc: Linux 3.18.10 +Patch: 1010_linux-3.18.11.patch +From: http://www.kernel.org +Desc: Linux 3.18.11 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1010_linux-3.18.11.patch b/1010_linux-3.18.11.patch new file mode 100644 index 00000000..ed524c85 --- /dev/null +++ b/1010_linux-3.18.11.patch @@ -0,0 +1,3765 @@ +diff --git a/Makefile b/Makefile +index d4ce2cb674c8..da8dc1350de3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 18 +-SUBLEVEL = 10 ++SUBLEVEL = 11 + EXTRAVERSION = + NAME = Diseased Newt + +diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi +index 712edce7d6fb..071b56aa0c7e 100644 +--- a/arch/arm/boot/dts/am33xx-clocks.dtsi ++++ b/arch/arm/boot/dts/am33xx-clocks.dtsi +@@ -99,7 +99,7 @@ + ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <0>; + reg = <0x0664>; + }; +@@ -107,7 +107,7 @@ + ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <1>; + reg = <0x0664>; + }; +@@ -115,7 +115,7 @@ + ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <2>; + reg = <0x0664>; + }; +diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi +index c7dc9dab93a4..cfb49686ab6a 100644 +--- a/arch/arm/boot/dts/am43xx-clocks.dtsi ++++ b/arch/arm/boot/dts/am43xx-clocks.dtsi +@@ -107,7 +107,7 @@ + ehrpwm0_tbclk: ehrpwm0_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <0>; + reg = <0x0664>; + }; +@@ -115,7 +115,7 @@ + ehrpwm1_tbclk: ehrpwm1_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <1>; + reg = <0x0664>; + }; +@@ -123,7 +123,7 @@ + ehrpwm2_tbclk: ehrpwm2_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <2>; + reg = <0x0664>; + }; +@@ -131,7 +131,7 @@ + ehrpwm3_tbclk: ehrpwm3_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <4>; + reg = <0x0664>; + }; +@@ -139,7 +139,7 @@ + ehrpwm4_tbclk: ehrpwm4_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <5>; + reg = <0x0664>; + }; +@@ -147,7 +147,7 @@ + ehrpwm5_tbclk: ehrpwm5_tbclk { + #clock-cells = <0>; + compatible = "ti,gate-clock"; +- clocks = <&dpll_per_m2_ck>; ++ clocks = <&l4ls_gclk>; + ti,bit-shift = <6>; + reg = <0x0664>; + }; +diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi +index 2c05b3f017fa..64c0f75b5444 100644 +--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi ++++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi +@@ -243,10 +243,18 @@ + ti,invert-autoidle-bit; + }; + ++ dpll_core_byp_mux: dpll_core_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ ti,bit-shift = <23>; ++ reg = <0x012c>; ++ }; ++ + dpll_core_ck: dpll_core_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-core-clock"; +- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; + reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; + }; + +@@ -309,10 +317,18 @@ + clock-div = <1>; + }; + ++ dpll_dsp_byp_mux: dpll_dsp_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; ++ ti,bit-shift = <23>; ++ reg = <0x0240>; ++ }; ++ + dpll_dsp_ck: dpll_dsp_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; ++ clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; + reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; + }; + +@@ -335,10 +351,18 @@ + clock-div = <1>; + }; + ++ dpll_iva_byp_mux: dpll_iva_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; ++ ti,bit-shift = <23>; ++ reg = <0x01ac>; ++ }; ++ + dpll_iva_ck: dpll_iva_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; ++ clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; + reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; + }; + +@@ -361,10 +385,18 @@ + clock-div = <1>; + }; + ++ dpll_gpu_byp_mux: dpll_gpu_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ ti,bit-shift = <23>; ++ reg = <0x02e4>; ++ }; ++ + dpll_gpu_ck: dpll_gpu_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; + reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; + }; + +@@ -398,10 +430,18 @@ + clock-div = <1>; + }; + ++ dpll_ddr_byp_mux: dpll_ddr_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ ti,bit-shift = <23>; ++ reg = <0x021c>; ++ }; ++ + dpll_ddr_ck: dpll_ddr_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; + reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; + }; + +@@ -416,10 +456,18 @@ + ti,invert-autoidle-bit; + }; + ++ dpll_gmac_byp_mux: dpll_gmac_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ ti,bit-shift = <23>; ++ reg = <0x02b4>; ++ }; ++ + dpll_gmac_ck: dpll_gmac_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; ++ clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; + reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; + }; + +@@ -482,10 +530,18 @@ + clock-div = <1>; + }; + ++ dpll_eve_byp_mux: dpll_eve_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; ++ ti,bit-shift = <23>; ++ reg = <0x0290>; ++ }; ++ + dpll_eve_ck: dpll_eve_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; ++ clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; + reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; + }; + +@@ -1249,10 +1305,18 @@ + clock-div = <1>; + }; + ++ dpll_per_byp_mux: dpll_per_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; ++ ti,bit-shift = <23>; ++ reg = <0x014c>; ++ }; ++ + dpll_per_ck: dpll_per_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-clock"; +- clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; ++ clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; + reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; + }; + +@@ -1275,10 +1339,18 @@ + clock-div = <1>; + }; + ++ dpll_usb_byp_mux: dpll_usb_byp_mux { ++ #clock-cells = <0>; ++ compatible = "ti,mux-clock"; ++ clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; ++ ti,bit-shift = <23>; ++ reg = <0x018c>; ++ }; ++ + dpll_usb_ck: dpll_usb_ck { + #clock-cells = <0>; + compatible = "ti,omap4-dpll-j-type-clock"; +- clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; ++ clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; + reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; + }; + +diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +index baf2f00d519a..b57e554dba4e 100644 +--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +@@ -35,6 +35,7 @@ + regulator-max-microvolt = <5000000>; + gpio = <&gpio3 22 0>; + enable-active-high; ++ vin-supply = <&swbst_reg>; + }; + + reg_usb_h1_vbus: regulator@1 { +@@ -45,6 +46,7 @@ + regulator-max-microvolt = <5000000>; + gpio = <&gpio1 29 0>; + enable-active-high; ++ vin-supply = <&swbst_reg>; + }; + + reg_audio: regulator@2 { +diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped +index 71e5fc7cfb18..1d1800f71c5b 100644 +--- a/arch/arm/crypto/aesbs-core.S_shipped ++++ b/arch/arm/crypto/aesbs-core.S_shipped +@@ -58,14 +58,18 @@ + # define VFP_ABI_FRAME 0 + # define BSAES_ASM_EXTENDED_KEY + # define XTS_CHAIN_TWEAK +-# define __ARM_ARCH__ 7 ++# define __ARM_ARCH__ __LINUX_ARM_ARCH__ ++# define __ARM_MAX_ARCH__ 7 + #endif + + #ifdef __thumb__ + # define adrl adr + #endif + +-#if __ARM_ARCH__>=7 ++#if __ARM_MAX_ARCH__>=7 ++.arch armv7-a ++.fpu neon ++ + .text + .syntax unified @ ARMv7-capable assembler is expected to handle this + #ifdef __thumb2__ +@@ -74,8 +78,6 @@ + .code 32 + #endif + +-.fpu neon +- + .type _bsaes_decrypt8,%function + .align 4 + _bsaes_decrypt8: +@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt: + vld1.8 {q8}, [r0] @ initial tweak + adr r2, .Lxts_magic + ++#ifndef XTS_CHAIN_TWEAK + tst r9, #0xf @ if not multiple of 16 + it ne @ Thumb2 thing, sanity check in ARM + subne r9, #0x10 @ subtract another 16 bytes ++#endif + subs r9, #0x80 + + blo .Lxts_dec_short +diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl +index be068db960ee..a4d3856e7d24 100644 +--- a/arch/arm/crypto/bsaes-armv7.pl ++++ b/arch/arm/crypto/bsaes-armv7.pl +@@ -701,14 +701,18 @@ $code.=<<___; + # define VFP_ABI_FRAME 0 + # define BSAES_ASM_EXTENDED_KEY + # define XTS_CHAIN_TWEAK +-# define __ARM_ARCH__ 7 ++# define __ARM_ARCH__ __LINUX_ARM_ARCH__ ++# define __ARM_MAX_ARCH__ 7 + #endif + + #ifdef __thumb__ + # define adrl adr + #endif + +-#if __ARM_ARCH__>=7 ++#if __ARM_MAX_ARCH__>=7 ++.arch armv7-a ++.fpu neon ++ + .text + .syntax unified @ ARMv7-capable assembler is expected to handle this + #ifdef __thumb2__ +@@ -717,8 +721,6 @@ $code.=<<___; + .code 32 + #endif + +-.fpu neon +- + .type _bsaes_decrypt8,%function + .align 4 + _bsaes_decrypt8: +@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt: + vld1.8 {@XMM[8]}, [r0] @ initial tweak + adr $magic, .Lxts_magic + ++#ifndef XTS_CHAIN_TWEAK + tst $len, #0xf @ if not multiple of 16 + it ne @ Thumb2 thing, sanity check in ARM + subne $len, #0x10 @ subtract another 16 bytes ++#endif + subs $len, #0x80 + + blo .Lxts_dec_short +diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h +index c5101dcb4fb0..1d4df3b70ebc 100644 +--- a/arch/arm/mach-at91/pm.h ++++ b/arch/arm/mach-at91/pm.h +@@ -45,7 +45,7 @@ static inline void at91rm9200_standby(void) + " mcr p15, 0, %0, c7, c0, 4\n\t" + " str %5, [%1, %2]" + : +- : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), ++ : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), + "r" (1), "r" (AT91RM9200_SDRAMC_SRR), + "r" (lpr)); + } +diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h +index a82c0c5c8b52..53d9c354219f 100644 +--- a/arch/arm64/include/asm/tlb.h ++++ b/arch/arm64/include/asm/tlb.h +@@ -19,10 +19,6 @@ + #ifndef __ASM_TLB_H + #define __ASM_TLB_H + +-#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry +- +-#include +- + #include + #include + +@@ -37,71 +33,23 @@ static inline void __tlb_remove_table(void *_table) + #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) + #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + +-/* +- * There's three ways the TLB shootdown code is used: +- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). +- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. +- * 2. Unmapping all vmas. See exit_mmap(). +- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. +- * Page tables will be freed. +- * 3. Unmapping argument pages. See shift_arg_pages(). +- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. +- */ ++#include ++ + static inline void tlb_flush(struct mmu_gather *tlb) + { + if (tlb->fullmm) { + flush_tlb_mm(tlb->mm); +- } else if (tlb->end > 0) { ++ } else { + struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + flush_tlb_range(&vma, tlb->start, tlb->end); +- tlb->start = TASK_SIZE; +- tlb->end = 0; +- } +-} +- +-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +-{ +- if (!tlb->fullmm) { +- tlb->start = min(tlb->start, addr); +- tlb->end = max(tlb->end, addr + PAGE_SIZE); +- } +-} +- +-/* +- * Memorize the range for the TLB flush. +- */ +-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, +- unsigned long addr) +-{ +- tlb_add_flush(tlb, addr); +-} +- +-/* +- * In the case of tlb vma handling, we can optimise these away in the +- * case where we're doing a full MM flush. When we're doing a munmap, +- * the vmas are adjusted to only cover the region to be torn down. +- */ +-static inline void tlb_start_vma(struct mmu_gather *tlb, +- struct vm_area_struct *vma) +-{ +- if (!tlb->fullmm) { +- tlb->start = TASK_SIZE; +- tlb->end = 0; + } + } + +-static inline void tlb_end_vma(struct mmu_gather *tlb, +- struct vm_area_struct *vma) +-{ +- if (!tlb->fullmm) +- tlb_flush(tlb); +-} +- + static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) + { ++ __flush_tlb_pgtable(tlb->mm, addr); + pgtable_page_dtor(pte); +- tlb_add_flush(tlb, addr); + tlb_remove_entry(tlb, pte); + } + +@@ -109,7 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long addr) + { +- tlb_add_flush(tlb, addr); ++ __flush_tlb_pgtable(tlb->mm, addr); + tlb_remove_entry(tlb, virt_to_page(pmdp)); + } + #endif +@@ -118,15 +66,9 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, + unsigned long addr) + { +- tlb_add_flush(tlb, addr); ++ __flush_tlb_pgtable(tlb->mm, addr); + tlb_remove_entry(tlb, virt_to_page(pudp)); + } + #endif + +-static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, +- unsigned long address) +-{ +- tlb_add_flush(tlb, address); +-} +- + #endif +diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h +index 73f0ce570fb3..8b8d8cb46e01 100644 +--- a/arch/arm64/include/asm/tlbflush.h ++++ b/arch/arm64/include/asm/tlbflush.h +@@ -149,6 +149,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end + } + + /* ++ * Used to invalidate the TLB (walk caches) corresponding to intermediate page ++ * table levels (pgd/pud/pmd). ++ */ ++static inline void __flush_tlb_pgtable(struct mm_struct *mm, ++ unsigned long uaddr) ++{ ++ unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); ++ ++ dsb(ishst); ++ asm("tlbi vae1is, %0" : : "r" (addr)); ++ dsb(ish); ++} ++/* + * On AArch64, the cache coherency is handled via the set_pte_at() function. + */ + static inline void update_mmu_cache(struct vm_area_struct *vma, +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c +index d92094203913..df34a70caca1 100644 +--- a/arch/arm64/mm/dma-mapping.c ++++ b/arch/arm64/mm/dma-mapping.c +@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) + } + early_param("coherent_pool", early_coherent_pool); + +-static void *__alloc_from_pool(size_t size, struct page **ret_page) ++static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) + { + unsigned long val; + void *ptr = NULL; +@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) + + *ret_page = phys_to_page(phys); + ptr = (void *)val; ++ if (flags & __GFP_ZERO) ++ memset(ptr, 0, size); + } + + return ptr; +@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, + flags |= GFP_DMA; + if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { + struct page *page; ++ void *addr; + + size = PAGE_ALIGN(size); + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, +@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, + return NULL; + + *dma_handle = phys_to_dma(dev, page_to_phys(page)); +- return page_address(page); ++ addr = page_address(page); ++ if (flags & __GFP_ZERO) ++ memset(addr, 0, size); ++ return addr; + } else { + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + } +@@ -145,7 +151,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, + + if (!(flags & __GFP_WAIT)) { + struct page *page = NULL; +- void *addr = __alloc_from_pool(size, &page); ++ void *addr = __alloc_from_pool(size, &page, flags); + + if (addr) + *dma_handle = phys_to_dma(dev, page_to_phys(page)); +diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h +index 8aa97817cc8c..99b6ded54849 100644 +--- a/arch/microblaze/include/asm/tlb.h ++++ b/arch/microblaze/include/asm/tlb.h +@@ -14,7 +14,6 @@ + #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + + #include +-#include + + #ifdef CONFIG_MMU + #define tlb_start_vma(tlb, vma) do { } while (0) +@@ -22,4 +21,6 @@ + #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + #endif + ++#include ++ + #endif /* _ASM_MICROBLAZE_TLB_H */ +diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h +index e9a9f60e596d..fc3ee06eab87 100644 +--- a/arch/powerpc/include/asm/pgalloc.h ++++ b/arch/powerpc/include/asm/pgalloc.h +@@ -3,7 +3,6 @@ + #ifdef __KERNEL__ + + #include +-#include + + #ifdef CONFIG_PPC_BOOK3E + extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address); +@@ -14,6 +13,8 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, + } + #endif /* !CONFIG_PPC_BOOK3E */ + ++extern void tlb_remove_table(struct mmu_gather *tlb, void *table); ++ + #ifdef CONFIG_PPC64 + #include + #else +diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h +index e2b428b0f7ba..20733fa518ae 100644 +--- a/arch/powerpc/include/asm/tlb.h ++++ b/arch/powerpc/include/asm/tlb.h +@@ -27,6 +27,7 @@ + + #define tlb_start_vma(tlb, vma) do { } while (0) + #define tlb_end_vma(tlb, vma) do { } while (0) ++#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry + + extern void tlb_flush(struct mmu_gather *tlb); + +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c +index 7e70ae968e5f..6a4a5fcb9730 100644 +--- a/arch/powerpc/mm/hugetlbpage.c ++++ b/arch/powerpc/mm/hugetlbpage.c +@@ -517,8 +517,6 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif + for (i = 0; i < num_hugepd; i++, hpdp++) + hpdp->pd = 0; + +- tlb->need_flush = 1; +- + #ifdef CONFIG_PPC_FSL_BOOK3E + hugepd_free(tlb, hugepte); + #else +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index ced09d8738b4..49e4d64ff74d 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -152,7 +152,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) + case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_S390_CSS_SUPPORT: +- case KVM_CAP_IRQFD: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_DEVICE_CTRL: + case KVM_CAP_ENABLE_CAP_VM: +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c +index 46a5e4508752..af53c25da2e7 100644 +--- a/arch/sparc/kernel/perf_event.c ++++ b/arch/sparc/kernel/perf_event.c +@@ -960,6 +960,8 @@ out: + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; + } + ++static void sparc_pmu_start(struct perf_event *event, int flags); ++ + /* On this PMU each PIC has it's own PCR control register. */ + static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) + { +@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) + struct perf_event *cp = cpuc->event[i]; + struct hw_perf_event *hwc = &cp->hw; + int idx = hwc->idx; +- u64 enc; + + if (cpuc->current_idx[i] != PIC_NO_INDEX) + continue; + +- sparc_perf_event_set_period(cp, hwc, idx); + cpuc->current_idx[i] = idx; + +- enc = perf_event_get_enc(cpuc->events[i]); +- cpuc->pcr[idx] &= ~mask_for_index(idx); +- if (hwc->state & PERF_HES_STOPPED) +- cpuc->pcr[idx] |= nop_for_index(idx); +- else +- cpuc->pcr[idx] |= event_encoding(enc, idx); ++ sparc_pmu_start(cp, PERF_EF_RELOAD); + } + out: + for (i = 0; i < cpuc->n_events; i++) { +@@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) + int i; + + local_irq_save(flags); +- perf_pmu_disable(event->pmu); + + for (i = 0; i < cpuc->n_events; i++) { + if (event == cpuc->event[i]) { +@@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) + } + } + +- perf_pmu_enable(event->pmu); + local_irq_restore(flags); + } + +@@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) + unsigned long flags; + + local_irq_save(flags); +- perf_pmu_disable(event->pmu); + + n0 = cpuc->n_events; + if (n0 >= sparc_pmu->max_hw_events) +@@ -1394,7 +1386,6 @@ nocheck: + + ret = 0; + out: +- perf_pmu_enable(event->pmu); + local_irq_restore(flags); + return ret; + } +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c +index 0be7bf978cb1..46a59643bb1c 100644 +--- a/arch/sparc/kernel/process_64.c ++++ b/arch/sparc/kernel/process_64.c +@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) + printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", + gp->tpc, gp->o7, gp->i7, gp->rpc); + } ++ ++ touch_nmi_watchdog(); + } + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); +@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) + (cpu == this_cpu ? '*' : ' '), cpu, + pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], + pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); ++ ++ touch_nmi_watchdog(); + } + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index c85403d0496c..30e7ddb27a3a 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second + long err; + + /* No need for backward compatibility. We can start fresh... */ +- if (call <= SEMCTL) { ++ if (call <= SEMTIMEDOP) { + switch (call) { + case SEMOP: + err = sys_semtimedop(first, ptr, +diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S +index b7f6334e159f..857ad4f8905f 100644 +--- a/arch/sparc/lib/memmove.S ++++ b/arch/sparc/lib/memmove.S +@@ -8,9 +8,11 @@ + + .text + ENTRY(memmove) /* o0=dst o1=src o2=len */ +- mov %o0, %g1 ++ brz,pn %o2, 99f ++ mov %o0, %g1 ++ + cmp %o0, %o1 +- bleu,pt %xcc, memcpy ++ bleu,pt %xcc, 2f + add %o1, %o2, %g7 + cmp %g7, %o0 + bleu,pt %xcc, memcpy +@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ + stb %g7, [%o0] + bne,pt %icc, 1b + sub %o0, 1, %o0 +- ++99: + retl + mov %g1, %o0 ++ ++ /* We can't just call memcpy for these memmove cases. On some ++ * chips the memcpy uses cache initializing stores and when dst ++ * and src are close enough, those can clobber the source data ++ * before we've loaded it in. ++ */ ++2: or %o0, %o1, %g7 ++ or %o2, %g7, %g7 ++ andcc %g7, 0x7, %g0 ++ bne,pn %xcc, 4f ++ nop ++ ++3: ldx [%o1], %g7 ++ add %o1, 8, %o1 ++ subcc %o2, 8, %o2 ++ add %o0, 8, %o0 ++ bne,pt %icc, 3b ++ stx %g7, [%o0 - 0x8] ++ ba,a,pt %xcc, 99b ++ ++4: ldub [%o1], %g7 ++ add %o1, 1, %o1 ++ subcc %o2, 1, %o2 ++ add %o0, 1, %o0 ++ bne,pt %icc, 4b ++ stb %g7, [%o0 - 0x1] ++ ba,a,pt %xcc, 99b + ENDPROC(memmove) +diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c +index be65f035d18a..5cbc96d801ff 100644 +--- a/arch/sparc/mm/srmmu.c ++++ b/arch/sparc/mm/srmmu.c +@@ -460,10 +460,12 @@ static void __init sparc_context_init(int numctx) + void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, + struct task_struct *tsk) + { ++ unsigned long flags; ++ + if (mm->context == NO_CONTEXT) { +- spin_lock(&srmmu_context_spinlock); ++ spin_lock_irqsave(&srmmu_context_spinlock, flags); + alloc_context(old_mm, mm); +- spin_unlock(&srmmu_context_spinlock); ++ spin_unlock_irqrestore(&srmmu_context_spinlock, flags); + srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); + } + +@@ -986,14 +988,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) + + void destroy_context(struct mm_struct *mm) + { ++ unsigned long flags; + + if (mm->context != NO_CONTEXT) { + flush_cache_mm(mm); + srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); + flush_tlb_mm(mm); +- spin_lock(&srmmu_context_spinlock); ++ spin_lock_irqsave(&srmmu_context_spinlock, flags); + free_context(mm->context); +- spin_unlock(&srmmu_context_spinlock); ++ spin_unlock_irqrestore(&srmmu_context_spinlock, flags); + mm->context = NO_CONTEXT; + } + } +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index 70fece226d17..5a93783a8a0d 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -1137,7 +1137,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) + src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); + if (!src) + return -ENOMEM; +- assoc = (src + req->cryptlen + auth_tag_len); ++ assoc = (src + req->cryptlen); + scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); + scatterwalk_map_and_copy(assoc, req->assoc, 0, + req->assoclen, 0); +@@ -1162,7 +1162,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) + scatterwalk_done(&src_sg_walk, 0, 0); + scatterwalk_done(&assoc_sg_walk, 0, 0); + } else { +- scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); ++ scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); + kfree(src); + } + return retval; +diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h +index e97622f57722..f895358db0ab 100644 +--- a/arch/x86/include/asm/fpu-internal.h ++++ b/arch/x86/include/asm/fpu-internal.h +@@ -368,7 +368,7 @@ static inline void drop_fpu(struct task_struct *tsk) + preempt_disable(); + tsk->thread.fpu_counter = 0; + __drop_fpu(tsk); +- clear_used_math(); ++ clear_stopped_child_used_math(tsk); + preempt_enable(); + } + +diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c +index 4128b5fcb559..2aaee79fb129 100644 +--- a/arch/x86/kernel/apic/apic_numachip.c ++++ b/arch/x86/kernel/apic/apic_numachip.c +@@ -40,7 +40,7 @@ static unsigned int get_apic_id(unsigned long x) + unsigned int id; + + rdmsrl(MSR_FAM10H_NODE_ID, value); +- id = ((x >> 24) & 0xffU) | ((value << 2) & 0x3f00U); ++ id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); + + return id; + } +diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c +index 0de1fae2bdf0..8be1e1711203 100644 +--- a/arch/x86/kernel/xsave.c ++++ b/arch/x86/kernel/xsave.c +@@ -378,7 +378,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) + * thread's fpu state, reconstruct fxstate from the fsave + * header. Sanitize the copied state etc. + */ +- struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; ++ struct fpu *fpu = &tsk->thread.fpu; + struct user_i387_ia32_struct env; + int err = 0; + +@@ -392,14 +392,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) + */ + drop_fpu(tsk); + +- if (__copy_from_user(xsave, buf_fx, state_size) || ++ if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || + __copy_from_user(&env, buf, sizeof(env))) { ++ fpu_finit(fpu); + err = -1; + } else { + sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); +- set_used_math(); + } + ++ set_used_math(); + if (use_eager_fpu()) { + preempt_disable(); + math_state_restore(); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 8b92cf4b165a..a38dd816015b 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2713,7 +2713,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) + case KVM_CAP_USER_NMI: + case KVM_CAP_REINJECT_CONTROL: + case KVM_CAP_IRQ_INJECT_STATUS: +- case KVM_CAP_IRQFD: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_IOEVENTFD_NO_LENGTH: + case KVM_CAP_PIT2: +diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S +index 31776d0efc8c..d7ec4e251c0a 100644 +--- a/arch/x86/vdso/vdso32/sigreturn.S ++++ b/arch/x86/vdso/vdso32/sigreturn.S +@@ -17,6 +17,7 @@ + .text + .globl __kernel_sigreturn + .type __kernel_sigreturn,@function ++ nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ + ALIGN + __kernel_sigreturn: + .LSTART_sigreturn: +diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c +index 7d1c540fa26a..3f187a529e92 100644 +--- a/drivers/char/tpm/tpm_i2c_stm_st33.c ++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c +@@ -397,7 +397,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, + */ + static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) + { +- int size = 0, burstcnt, len; ++ int size = 0, burstcnt, len, ret; + struct i2c_client *client; + + client = (struct i2c_client *)TPM_VPRIV(chip); +@@ -406,13 +406,15 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) + wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + chip->vendor.timeout_c, +- &chip->vendor.read_queue) +- == 0) { ++ &chip->vendor.read_queue) == 0) { + burstcnt = get_burstcount(chip); + if (burstcnt < 0) + return burstcnt; + len = min_t(int, burstcnt, count - size); +- I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len); ++ ret = I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len); ++ if (ret < 0) ++ return ret; ++ + size += len; + } + return size; +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index eff9d5870034..102463ba745d 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) + { + struct ibmvtpm_dev *ibmvtpm; + struct ibmvtpm_crq crq; +- u64 *word = (u64 *) &crq; ++ __be64 *word = (__be64 *)&crq; + int rc; + + ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); +@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) + memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_TPM_COMMAND; +- crq.len = (u16)count; +- crq.data = ibmvtpm->rtce_dma_handle; ++ crq.len = cpu_to_be16(count); ++ crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), +- cpu_to_be64(word[1])); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), ++ be64_to_cpu(word[1])); + if (rc != H_SUCCESS) { + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); + rc = 0; +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h +index bd82a791f995..b2c231b1beec 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.h ++++ b/drivers/char/tpm/tpm_ibmvtpm.h +@@ -22,9 +22,9 @@ + struct ibmvtpm_crq { + u8 valid; + u8 msg; +- u16 len; +- u32 data; +- u64 reserved; ++ __be16 len; ++ __be32 data; ++ __be64 reserved; + } __attribute__((packed, aligned(8))); + + struct ibmvtpm_crq_queue { +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index cf7a561fad7c..6e09c1dac2b7 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -142,6 +142,7 @@ struct ports_device { + * notification + */ + struct work_struct control_work; ++ struct work_struct config_work; + + struct list_head ports; + +@@ -1832,10 +1833,21 @@ static void config_intr(struct virtio_device *vdev) + + portdev = vdev->priv; + ++ if (!use_multiport(portdev)) ++ schedule_work(&portdev->config_work); ++} ++ ++static void config_work_handler(struct work_struct *work) ++{ ++ struct ports_device *portdev; ++ ++ portdev = container_of(work, struct ports_device, control_work); + if (!use_multiport(portdev)) { ++ struct virtio_device *vdev; + struct port *port; + u16 rows, cols; + ++ vdev = portdev->vdev; + virtio_cread(vdev, struct virtio_console_config, cols, &cols); + virtio_cread(vdev, struct virtio_console_config, rows, &rows); + +@@ -2026,12 +2038,14 @@ static int virtcons_probe(struct virtio_device *vdev) + + virtio_device_ready(portdev->vdev); + ++ INIT_WORK(&portdev->config_work, &config_work_handler); ++ INIT_WORK(&portdev->control_work, &control_work_handler); ++ + if (multiport) { + unsigned int nr_added_bufs; + + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); +- INIT_WORK(&portdev->control_work, &control_work_handler); + + nr_added_bufs = fill_queue(portdev->c_ivq, + &portdev->c_ivq_lock); +@@ -2099,6 +2113,8 @@ static void virtcons_remove(struct virtio_device *vdev) + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); ++ else ++ cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); +@@ -2150,6 +2166,7 @@ static int virtcons_freeze(struct virtio_device *vdev) + + virtqueue_disable_cb(portdev->c_ivq); + cancel_work_sync(&portdev->control_work); ++ cancel_work_sync(&portdev->config_work); + /* + * Once more: if control_work_handler() was running, it would + * enable the cb as the last step. +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index e79c8d3700d8..da41ad42d3a6 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -42,9 +42,10 @@ + #include "drm_crtc_internal.h" + #include "drm_internal.h" + +-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, +- struct drm_mode_fb_cmd2 *r, +- struct drm_file *file_priv); ++static struct drm_framebuffer * ++internal_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd2 *r, ++ struct drm_file *file_priv); + + /* Avoid boilerplate. I'm tired of typing. */ + #define DRM_ENUM_NAME_FN(fnname, list) \ +@@ -2739,13 +2740,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, + */ + if (req->flags & DRM_MODE_CURSOR_BO) { + if (req->handle) { +- fb = add_framebuffer_internal(dev, &fbreq, file_priv); ++ fb = internal_framebuffer_create(dev, &fbreq, file_priv); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); + return PTR_ERR(fb); + } +- +- drm_framebuffer_reference(fb); + } else { + fb = NULL; + } +@@ -3114,9 +3113,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) + return 0; + } + +-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, +- struct drm_mode_fb_cmd2 *r, +- struct drm_file *file_priv) ++static struct drm_framebuffer * ++internal_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd2 *r, ++ struct drm_file *file_priv) + { + struct drm_mode_config *config = &dev->mode_config; + struct drm_framebuffer *fb; +@@ -3148,12 +3148,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + return fb; + } + +- mutex_lock(&file_priv->fbs_lock); +- r->fb_id = fb->base.id; +- list_add(&fb->filp_head, &file_priv->fbs); +- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); +- mutex_unlock(&file_priv->fbs_lock); +- + return fb; + } + +@@ -3175,15 +3169,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv) + { ++ struct drm_mode_fb_cmd2 *r = data; + struct drm_framebuffer *fb; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + +- fb = add_framebuffer_internal(dev, data, file_priv); ++ fb = internal_framebuffer_create(dev, r, file_priv); + if (IS_ERR(fb)) + return PTR_ERR(fb); + ++ /* Transfer ownership to the filp for reaping on close */ ++ ++ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); ++ mutex_lock(&file_priv->fbs_lock); ++ r->fb_id = fb->base.id; ++ list_add(&fb->filp_head, &file_priv->fbs); ++ mutex_unlock(&file_priv->fbs_lock); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index dce0d3918fa7..9f0e62529c46 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, + (x << 16) | y); + viewport_w = crtc->mode.hdisplay; + viewport_h = (crtc->mode.vdisplay + 1) & ~1; ++ if ((rdev->family >= CHIP_BONAIRE) && ++ (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) ++ viewport_h *= 2; + WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, + (viewport_w << 16) | viewport_h); + +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index 5f395be9b3e3..0c6fbc0198a5 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + ++ /* posting read */ ++ RREG32(SRBM_STATUS); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index 85995b4e3338..c674f63d7f14 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -4589,6 +4589,9 @@ int evergreen_irq_set(struct radeon_device *rdev) + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); + WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); + ++ /* posting read */ ++ RREG32(SRBM_STATUS); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c +index cdf6e2149539..a959cc1e7c8e 100644 +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) + tmp |= RADEON_FP2_DETECT_MASK; + } + WREG32(RADEON_GEN_INT_CNTL, tmp); ++ ++ /* read back to post the write */ ++ RREG32(RADEON_GEN_INT_CNTL); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c +index 56b02927cd3d..ee0868dec2f4 100644 +--- a/drivers/gpu/drm/radeon/r600.c ++++ b/drivers/gpu/drm/radeon/r600.c +@@ -3787,6 +3787,9 @@ int r600_irq_set(struct radeon_device *rdev) + WREG32(RV770_CG_THERMAL_INT, thermal_int); + } + ++ /* posting read */ ++ RREG32(R_000E50_SRBM_STATUS); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c +index 6f377de099f9..a5b7f6f98f5f 100644 +--- a/drivers/gpu/drm/radeon/radeon_cs.c ++++ b/drivers/gpu/drm/radeon/radeon_cs.c +@@ -275,11 +275,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) + u32 ring = RADEON_CS_RING_GFX; + s32 priority = 0; + ++ INIT_LIST_HEAD(&p->validated); ++ + if (!cs->num_chunks) { + return 0; + } ++ + /* get chunks */ +- INIT_LIST_HEAD(&p->validated); + p->idx = 0; + p->ib.sa_bo = NULL; + p->ib.semaphore = NULL; +diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c +index 995167025282..8569afaba688 100644 +--- a/drivers/gpu/drm/radeon/radeon_fence.c ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -1029,37 +1029,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) + return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); + } + ++struct radeon_wait_cb { ++ struct fence_cb base; ++ struct task_struct *task; ++}; ++ ++static void ++radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) ++{ ++ struct radeon_wait_cb *wait = ++ container_of(cb, struct radeon_wait_cb, base); ++ ++ wake_up_process(wait->task); ++} ++ + static signed long radeon_fence_default_wait(struct fence *f, bool intr, + signed long t) + { + struct radeon_fence *fence = to_radeon_fence(f); + struct radeon_device *rdev = fence->rdev; +- bool signaled; ++ struct radeon_wait_cb cb; + +- fence_enable_sw_signaling(&fence->base); ++ cb.task = current; + +- /* +- * This function has to return -EDEADLK, but cannot hold +- * exclusive_lock during the wait because some callers +- * may already hold it. This means checking needs_reset without +- * lock, and not fiddling with any gpu internals. +- * +- * The callback installed with fence_enable_sw_signaling will +- * run before our wait_event_*timeout call, so we will see +- * both the signaled fence and the changes to needs_reset. +- */ ++ if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) ++ return t; ++ ++ while (t > 0) { ++ if (intr) ++ set_current_state(TASK_INTERRUPTIBLE); ++ else ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ ++ /* ++ * radeon_test_signaled must be called after ++ * set_current_state to prevent a race with wake_up_process ++ */ ++ if (radeon_test_signaled(fence)) ++ break; ++ ++ if (rdev->needs_reset) { ++ t = -EDEADLK; ++ break; ++ } ++ ++ t = schedule_timeout(t); ++ ++ if (t > 0 && intr && signal_pending(current)) ++ t = -ERESTARTSYS; ++ } ++ ++ __set_current_state(TASK_RUNNING); ++ fence_remove_callback(f, &cb.base); + +- if (intr) +- t = wait_event_interruptible_timeout(rdev->fence_queue, +- ((signaled = radeon_test_signaled(fence)) || +- rdev->needs_reset), t); +- else +- t = wait_event_timeout(rdev->fence_queue, +- ((signaled = radeon_test_signaled(fence)) || +- rdev->needs_reset), t); +- +- if (t > 0 && !signaled) +- return -EDEADLK; + return t; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c +index 194f6245c379..2a7ba30165c7 100644 +--- a/drivers/gpu/drm/radeon/radeon_object.c ++++ b/drivers/gpu/drm/radeon/radeon_object.c +@@ -151,19 +151,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) + else + rbo->placements[i].lpfn = 0; + } +- +- /* +- * Use two-ended allocation depending on the buffer size to +- * improve fragmentation quality. +- * 512kb was measured as the most optimal number. +- */ +- if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && +- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && +- rbo->tbo.mem.size > 512 * 1024) { +- for (i = 0; i < c; i++) { +- rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; +- } +- } + } + + int radeon_bo_create(struct radeon_device *rdev, +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c +index 74bce91aecc1..039660662ee8 100644 +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -693,6 +693,10 @@ int rs600_irq_set(struct radeon_device *rdev) + WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); + if (ASIC_IS_DCE2(rdev)) + WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); ++ ++ /* posting read */ ++ RREG32(R_000040_GEN_INT_CNTL); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c +index 7d5083dc4acb..1c3d90c17cb3 100644 +--- a/drivers/gpu/drm/radeon/si.c ++++ b/drivers/gpu/drm/radeon/si.c +@@ -6192,6 +6192,9 @@ int si_irq_set(struct radeon_device *rdev) + + WREG32(CG_THERMAL_INT, thermal_int); + ++ /* posting read */ ++ RREG32(SRBM_STATUS); ++ + return 0; + } + +@@ -7112,8 +7115,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); + + if (!vclk || !dclk) { +- /* keep the Bypass mode, put PLL to sleep */ +- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); ++ /* keep the Bypass mode */ + return 0; + } + +@@ -7129,8 +7131,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) + /* set VCO_MODE to 1 */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); + +- /* toggle UPLL_SLEEP to 1 then back to 0 */ +- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); ++ /* disable sleep mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); + + /* deassert UPLL_RESET */ +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 810dac80179c..0426b5bed8fc 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) + goto out_err1; + } + +- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, +- (dev_priv->vram_size >> PAGE_SHIFT)); +- if (unlikely(ret != 0)) { +- DRM_ERROR("Failed initializing memory manager for VRAM.\n"); +- goto out_err2; +- } +- +- dev_priv->has_gmr = true; +- if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || +- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, +- VMW_PL_GMR) != 0) { +- DRM_INFO("No GMR memory available. " +- "Graphics memory resources are very limited.\n"); +- dev_priv->has_gmr = false; +- } +- +- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { +- dev_priv->has_mob = true; +- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, +- VMW_PL_MOB) != 0) { +- DRM_INFO("No MOB memory available. " +- "3D will be disabled.\n"); +- dev_priv->has_mob = false; +- } +- } +- + dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, + dev_priv->mmio_size); + +@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) + goto out_no_fman; + } + ++ ++ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, ++ (dev_priv->vram_size >> PAGE_SHIFT)); ++ if (unlikely(ret != 0)) { ++ DRM_ERROR("Failed initializing memory manager for VRAM.\n"); ++ goto out_no_vram; ++ } ++ ++ dev_priv->has_gmr = true; ++ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || ++ refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, ++ VMW_PL_GMR) != 0) { ++ DRM_INFO("No GMR memory available. " ++ "Graphics memory resources are very limited.\n"); ++ dev_priv->has_gmr = false; ++ } ++ ++ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { ++ dev_priv->has_mob = true; ++ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, ++ VMW_PL_MOB) != 0) { ++ DRM_INFO("No MOB memory available. " ++ "3D will be disabled.\n"); ++ dev_priv->has_mob = false; ++ } ++ } ++ + vmw_kms_save_vga(dev_priv); + + /* Start kms and overlay systems, needs fifo. */ +@@ -838,6 +839,12 @@ out_no_fifo: + vmw_kms_close(dev_priv); + out_no_kms: + vmw_kms_restore_vga(dev_priv); ++ if (dev_priv->has_mob) ++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); ++ if (dev_priv->has_gmr) ++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); ++ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); ++out_no_vram: + vmw_fence_manager_takedown(dev_priv->fman); + out_no_fman: + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) +@@ -853,12 +860,6 @@ out_err4: + iounmap(dev_priv->mmio_virt); + out_err3: + arch_phys_wc_del(dev_priv->mmio_mtrr); +- if (dev_priv->has_mob) +- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); +- if (dev_priv->has_gmr) +- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); +- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +-out_err2: + (void)ttm_bo_device_release(&dev_priv->bdev); + out_err1: + vmw_ttm_global_release(dev_priv); +@@ -888,6 +889,13 @@ static int vmw_driver_unload(struct drm_device *dev) + } + vmw_kms_close(dev_priv); + vmw_overlay_close(dev_priv); ++ ++ if (dev_priv->has_mob) ++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); ++ if (dev_priv->has_gmr) ++ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); ++ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); ++ + vmw_fence_manager_takedown(dev_priv->fman); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); +@@ -899,11 +907,6 @@ static int vmw_driver_unload(struct drm_device *dev) + ttm_object_device_release(&dev_priv->tdev); + iounmap(dev_priv->mmio_virt); + arch_phys_wc_del(dev_priv->mmio_mtrr); +- if (dev_priv->has_mob) +- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); +- if (dev_priv->has_gmr) +- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); +- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + (void)ttm_bo_device_release(&dev_priv->bdev); + vmw_ttm_global_release(dev_priv); + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index 596cd6dafd33..50b52802f470 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -2778,13 +2778,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + NULL, arg->command_size, arg->throttle_us, + (void __user *)(unsigned long)arg->fence_rep, + NULL); +- ++ ttm_read_unlock(&dev_priv->reservation_sem); + if (unlikely(ret != 0)) +- goto out_unlock; ++ return ret; + + vmw_kms_cursor_post_execbuf(dev_priv); + +-out_unlock: +- ttm_read_unlock(&dev_priv->reservation_sem); +- return ret; ++ return 0; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index fddd53335237..173ec3377e4f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + int i; + struct drm_mode_config *mode_config = &dev->mode_config; + +- ret = ttm_read_lock(&dev_priv->reservation_sem, true); +- if (unlikely(ret != 0)) +- return ret; +- + if (!arg->num_outputs) { + struct drm_vmw_rect def_rect = {0, 0, 800, 600}; + vmw_du_update_layout(dev_priv, 1, &def_rect); +- goto out_unlock; ++ return 0; + } + + rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); + rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), + GFP_KERNEL); +- if (unlikely(!rects)) { +- ret = -ENOMEM; +- goto out_unlock; +- } ++ if (unlikely(!rects)) ++ return -ENOMEM; + + user_rects = (void __user *)(unsigned long)arg->rects; + ret = copy_from_user(rects, user_rects, rects_size); +@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + + out_free: + kfree(rects); +-out_unlock: +- ttm_read_unlock(&dev_priv->reservation_sem); + return ret; + } +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index f43b4e11647a..17a1853c6c2f 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -665,9 +665,6 @@ static int i2c_device_remove(struct device *dev) + status = driver->remove(client); + } + +- if (dev->of_node) +- irq_dispose_mapping(client->irq); +- + dev_pm_domain_detach(&client->dev, true); + return status; + } +diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c +index 6a2e168c3ab0..41ac85af043e 100644 +--- a/drivers/irqchip/irq-armada-370-xp.c ++++ b/drivers/irqchip/irq-armada-370-xp.c +@@ -67,6 +67,7 @@ + static void __iomem *per_cpu_int_base; + static void __iomem *main_int_base; + static struct irq_domain *armada_370_xp_mpic_domain; ++static int parent_irq; + #ifdef CONFIG_PCI_MSI + static struct irq_domain *armada_370_xp_msi_domain; + static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); +@@ -354,6 +355,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, + { + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + armada_xp_mpic_smp_cpu_init(); ++ + return NOTIFY_OK; + } + +@@ -362,6 +364,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { + .priority = 100, + }; + ++static int mpic_cascaded_secondary_init(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) ++ enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block mpic_cascaded_cpu_notifier = { ++ .notifier_call = mpic_cascaded_secondary_init, ++ .priority = 100, ++}; ++ + #endif /* CONFIG_SMP */ + + static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { +@@ -489,7 +505,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, + struct device_node *parent) + { + struct resource main_int_res, per_cpu_int_res; +- int parent_irq, nr_irqs, i; ++ int nr_irqs, i; + u32 control; + + BUG_ON(of_address_to_resource(node, 0, &main_int_res)); +@@ -537,6 +553,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, + register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); + #endif + } else { ++#ifdef CONFIG_SMP ++ register_cpu_notifier(&mpic_cascaded_cpu_notifier); ++#endif + irq_set_chained_handler(parent_irq, + armada_370_xp_mpic_handle_cascade_irq); + } +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c +index 96b0b1d27df1..bc677362bc73 100644 +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) + nand_writel(info, NDCR, ndcr | int_mask); + } + ++static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) ++{ ++ if (info->ecc_bch) { ++ int timeout; ++ ++ /* ++ * According to the datasheet, when reading from NDDB ++ * with BCH enabled, after each 32 bytes reads, we ++ * have to make sure that the NDSR.RDDREQ bit is set. ++ * ++ * Drain the FIFO 8 32 bits reads at a time, and skip ++ * the polling on the last read. ++ */ ++ while (len > 8) { ++ __raw_readsl(info->mmio_base + NDDB, data, 8); ++ ++ for (timeout = 0; ++ !(nand_readl(info, NDSR) & NDSR_RDDREQ); ++ timeout++) { ++ if (timeout >= 5) { ++ dev_err(&info->pdev->dev, ++ "Timeout on RDDREQ while draining the FIFO\n"); ++ return; ++ } ++ ++ mdelay(1); ++ } ++ ++ data += 32; ++ len -= 8; ++ } ++ } ++ ++ __raw_readsl(info->mmio_base + NDDB, data, len); ++} ++ + static void handle_data_pio(struct pxa3xx_nand_info *info) + { + unsigned int do_bytes = min(info->data_size, info->chunk_size); +@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) + DIV_ROUND_UP(info->oob_size, 4)); + break; + case STATE_PIO_READING: +- __raw_readsl(info->mmio_base + NDDB, +- info->data_buff + info->data_buff_pos, +- DIV_ROUND_UP(do_bytes, 4)); ++ drain_fifo(info, ++ info->data_buff + info->data_buff_pos, ++ DIV_ROUND_UP(do_bytes, 4)); + + if (info->oob_size > 0) +- __raw_readsl(info->mmio_base + NDDB, +- info->oob_buff + info->oob_buff_pos, +- DIV_ROUND_UP(info->oob_size, 4)); ++ drain_fifo(info, ++ info->oob_buff + info->oob_buff_pos, ++ DIV_ROUND_UP(info->oob_size, 4)); + break; + default: + dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 4b008c9c738d..573b53b38af4 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -500,6 +500,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + ++ skb_reset_mac_header(skb); ++ skb_reset_network_header(skb); ++ skb_reset_transport_header(skb); ++ + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + +@@ -524,6 +528,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + ++ skb_reset_mac_header(skb); ++ skb_reset_network_header(skb); ++ skb_reset_transport_header(skb); ++ + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index 8b255e777cc7..5d777956ae1f 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -12,6 +12,7 @@ + * Copyright (C) 2012 Olivier Sobrie + */ + ++#include + #include + #include + #include +@@ -403,8 +404,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, + while (pos <= actual_len - MSG_HEADER_LEN) { + tmp = buf + pos; + +- if (!tmp->len) +- break; ++ /* Handle messages crossing the USB endpoint max packet ++ * size boundary. Check kvaser_usb_read_bulk_callback() ++ * for further details. ++ */ ++ if (tmp->len == 0) { ++ pos = round_up(pos, ++ dev->bulk_in->wMaxPacketSize); ++ continue; ++ } + + if (pos + tmp->len > actual_len) { + dev_err(dev->udev->dev.parent, +@@ -983,8 +991,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) + while (pos <= urb->actual_length - MSG_HEADER_LEN) { + msg = urb->transfer_buffer + pos; + +- if (!msg->len) +- break; ++ /* The Kvaser firmware can only read and write messages that ++ * does not cross the USB's endpoint wMaxPacketSize boundary. ++ * If a follow-up command crosses such boundary, firmware puts ++ * a placeholder zero-length command in its place then aligns ++ * the real command to the next max packet size. ++ * ++ * Handle such cases or we're going to miss a significant ++ * number of events in case of a heavy rx load on the bus. ++ */ ++ if (msg->len == 0) { ++ pos = round_up(pos, dev->bulk_in->wMaxPacketSize); ++ continue; ++ } + + if (pos + msg->len > urb->actual_length) { + dev_err(dev->udev->dev.parent, "Format error\n"); +@@ -992,7 +1011,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) + } + + kvaser_usb_handle_message(dev, msg); +- + pos += msg->len; + } + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +index 74fbf9ea7bd8..710eb5793eb3 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +@@ -12711,6 +12711,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + ++ /* Set PCIe reset type to fundamental for EEH recovery */ ++ pdev->needs_freset = 1; ++ + /* AER (Advanced Error reporting) configuration */ + rc = pci_enable_pcie_error_reporting(pdev); + if (!rc) +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 3dca494797bd..96ba23e90111 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1464,8 +1464,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) + + vlan_packet_rcvd = true; + +- skb_copy_to_linear_data_offset(skb, VLAN_HLEN, +- data, (2 * ETH_ALEN)); ++ memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + } + +diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c +index 3eed708a6182..fe48f4c51373 100644 +--- a/drivers/net/usb/cx82310_eth.c ++++ b/drivers/net/usb/cx82310_eth.c +@@ -300,9 +300,18 @@ static const struct driver_info cx82310_info = { + .tx_fixup = cx82310_tx_fixup, + }; + ++#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ ++ USB_DEVICE_ID_MATCH_DEV_INFO, \ ++ .idVendor = (vend), \ ++ .idProduct = (prod), \ ++ .bDeviceClass = (cl), \ ++ .bDeviceSubClass = (sc), \ ++ .bDeviceProtocol = (pr) ++ + static const struct usb_device_id products[] = { + { +- USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), ++ USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), + .driver_info = (unsigned long) &cx82310_info + }, + { }, +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index b0bc8ead47de..484ecce78025 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1452,8 +1452,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) + { + int i; + +- for (i = 0; i < vi->max_queue_pairs; i++) ++ for (i = 0; i < vi->max_queue_pairs; i++) { ++ napi_hash_del(&vi->rq[i].napi); + netif_napi_del(&vi->rq[i].napi); ++ } + + kfree(vi->rq); + kfree(vi->sq); +@@ -1939,11 +1941,8 @@ static int virtnet_freeze(struct virtio_device *vdev) + cancel_delayed_work_sync(&vi->refill); + + if (netif_running(vi->dev)) { +- for (i = 0; i < vi->max_queue_pairs; i++) { ++ for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); +- napi_hash_del(&vi->rq[i].napi); +- netif_napi_del(&vi->rq[i].napi); +- } + } + + remove_vq_common(vi); +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index 2c6643fdc0cf..eb1543841e39 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -515,7 +515,8 @@ static ssize_t driver_override_store(struct device *dev, + struct pci_dev *pdev = to_pci_dev(dev); + char *driver_override, *old = pdev->driver_override, *cp; + +- if (count > PATH_MAX) ++ /* We need to keep extra room for a newline */ ++ if (count >= (PAGE_SIZE - 1)) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); +@@ -543,7 +544,7 @@ static ssize_t driver_override_show(struct device *dev, + { + struct pci_dev *pdev = to_pci_dev(dev); + +- return sprintf(buf, "%s\n", pdev->driver_override); ++ return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + } + static DEVICE_ATTR_RW(driver_override); + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index fc6fb5422b6f..d92612c51657 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1841,10 +1841,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev) + } + + if (rdev->ena_pin) { +- ret = regulator_ena_gpio_ctrl(rdev, true); +- if (ret < 0) +- return ret; +- rdev->ena_gpio_state = 1; ++ if (!rdev->ena_gpio_state) { ++ ret = regulator_ena_gpio_ctrl(rdev, true); ++ if (ret < 0) ++ return ret; ++ rdev->ena_gpio_state = 1; ++ } + } else if (rdev->desc->ops->enable) { + ret = rdev->desc->ops->enable(rdev); + if (ret < 0) +@@ -1941,10 +1943,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) + trace_regulator_disable(rdev_get_name(rdev)); + + if (rdev->ena_pin) { +- ret = regulator_ena_gpio_ctrl(rdev, false); +- if (ret < 0) +- return ret; +- rdev->ena_gpio_state = 0; ++ if (rdev->ena_gpio_state) { ++ ret = regulator_ena_gpio_ctrl(rdev, false); ++ if (ret < 0) ++ return ret; ++ rdev->ena_gpio_state = 0; ++ } + + } else if (rdev->desc->ops->disable) { + ret = rdev->desc->ops->disable(rdev); +@@ -3659,12 +3663,6 @@ regulator_register(const struct regulator_desc *regulator_desc, + config->ena_gpio, ret); + goto wash; + } +- +- if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) +- rdev->ena_gpio_state = 1; +- +- if (config->ena_gpio_invert) +- rdev->ena_gpio_state = !rdev->ena_gpio_state; + } + + /* set regulator constraints */ +@@ -3837,9 +3835,11 @@ int regulator_suspend_finish(void) + list_for_each_entry(rdev, ®ulator_list, list) { + mutex_lock(&rdev->mutex); + if (rdev->use_count > 0 || rdev->constraints->always_on) { +- error = _regulator_do_enable(rdev); +- if (error) +- ret = error; ++ if (!_regulator_is_enabled(rdev)) { ++ error = _regulator_do_enable(rdev); ++ if (error) ++ ret = error; ++ } + } else { + if (!have_full_constraints()) + goto unlock; +diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c +index 196a5c8838c4..b019956ba5dd 100644 +--- a/drivers/regulator/rk808-regulator.c ++++ b/drivers/regulator/rk808-regulator.c +@@ -184,6 +184,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(0), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG2", +@@ -198,6 +199,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(1), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG3", +@@ -212,6 +214,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_BUCK4_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(2), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG4", +@@ -226,6 +229,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(3), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG5", +@@ -240,6 +244,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(4), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG6", +@@ -254,6 +259,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(5), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG7", +@@ -268,6 +274,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(6), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "LDO_REG8", +@@ -282,6 +289,7 @@ static const struct regulator_desc rk808_reg[] = { + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(7), ++ .enable_time = 400, + .owner = THIS_MODULE, + }, { + .name = "SWITCH_REG1", +diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c +index 806072238c00..22a612b06742 100644 +--- a/drivers/rtc/rtc-s3c.c ++++ b/drivers/rtc/rtc-s3c.c +@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { + + static struct s3c_rtc_data const s3c6410_rtc_data = { + .max_user_freq = 32768, ++ .needs_src_clk = true, + .irq_handler = s3c6410_rtc_irq, + .set_freq = s3c6410_rtc_setfreq, + .enable_tick = s3c6410_rtc_enable_tick, +diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c +index 62b58d38ce2e..60de66252fa2 100644 +--- a/drivers/scsi/libsas/sas_discover.c ++++ b/drivers/scsi/libsas/sas_discover.c +@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) + struct sas_discovery_event *ev = to_sas_discovery_event(work); + struct asd_sas_port *port = ev->port; + struct sas_ha_struct *ha = port->ha; ++ struct domain_device *ddev = port->port_dev; + + /* prevent revalidation from finding sata links in recovery */ + mutex_lock(&ha->disco_mutex); +@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) + SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, + task_pid_nr(current)); + +- if (port->port_dev) +- res = sas_ex_revalidate_domain(port->port_dev); ++ if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || ++ ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) ++ res = sas_ex_revalidate_domain(ddev); + + SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", + port->id, task_pid_nr(current), res); +diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c +index 113c83f44b5c..4bf337aa4fd5 100644 +--- a/drivers/spi/spi-atmel.c ++++ b/drivers/spi/spi-atmel.c +@@ -775,17 +775,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, + (unsigned long long)xfer->rx_dma); + } + +- /* REVISIT: We're waiting for ENDRX before we start the next ++ /* REVISIT: We're waiting for RXBUFF before we start the next + * transfer because we need to handle some difficult timing +- * issues otherwise. If we wait for ENDTX in one transfer and +- * then starts waiting for ENDRX in the next, it's difficult +- * to tell the difference between the ENDRX interrupt we're +- * actually waiting for and the ENDRX interrupt of the ++ * issues otherwise. If we wait for TXBUFE in one transfer and ++ * then starts waiting for RXBUFF in the next, it's difficult ++ * to tell the difference between the RXBUFF interrupt we're ++ * actually waiting for and the RXBUFF interrupt of the + * previous transfer. + * + * It should be doable, though. Just not now... + */ +- spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); ++ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); + spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); + } + +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index efff55537d8a..1417f96546ce 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -151,6 +151,9 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) + 1, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!txdesc) ++ return NULL; ++ + txdesc->callback = dw_spi_dma_done; + txdesc->callback_param = dws; + +@@ -173,6 +176,9 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) + 1, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!rxdesc) ++ return NULL; ++ + rxdesc->callback = dw_spi_dma_done; + rxdesc->callback_param = dws; + +diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c +index fc2dd8441608..11e18342bc4f 100644 +--- a/drivers/spi/spi-pl022.c ++++ b/drivers/spi/spi-pl022.c +@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) + pl022->cur_msg = NULL; + pl022->cur_transfer = NULL; + pl022->cur_chip = NULL; +- spi_finalize_current_message(pl022->master); + + /* disable the SPI/SSP operation */ + writew((readw(SSP_CR1(pl022->virtbase)) & + (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + ++ spi_finalize_current_message(pl022->master); + } + + /** +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 73e58d22e325..6446490854cb 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4220,11 +4220,17 @@ int iscsit_close_connection( + pr_debug("Closing iSCSI connection CID %hu on SID:" + " %u\n", conn->cid, sess->sid); + /* +- * Always up conn_logout_comp just in case the RX Thread is sleeping +- * and the logout response never got sent because the connection +- * failed. ++ * Always up conn_logout_comp for the traditional TCP case just in case ++ * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout ++ * response never got sent because the connection failed. ++ * ++ * However for iser-target, isert_wait4logout() is using conn_logout_comp ++ * to signal logout response TX interrupt completion. Go ahead and skip ++ * this for iser since isert_rx_opcode() does not wait on logout failure, ++ * and to avoid iscsi_conn pointer dereference in iser-target code. + */ +- complete(&conn->conn_logout_comp); ++ if (conn->conn_transport->transport_type == ISCSI_TCP) ++ complete(&conn->conn_logout_comp); + + iscsi_release_thread_set(conn); + +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 24fa5d1999af..9e0f5d3b3ebf 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1598,8 +1598,6 @@ int target_configure_device(struct se_device *dev) + ret = dev->transport->configure_device(dev); + if (ret) + goto out; +- dev->dev_flags |= DF_CONFIGURED; +- + /* + * XXX: there is not much point to have two different values here.. + */ +@@ -1661,6 +1659,8 @@ int target_configure_device(struct se_device *dev) + list_add_tail(&dev->g_dev_node, &g_device_list); + mutex_unlock(&g_device_mutex); + ++ dev->dev_flags |= DF_CONFIGURED; ++ + return 0; + + out_free_alua: +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c +index 7c8291f0bbbc..9a54381e23c6 100644 +--- a/drivers/target/target_core_pscsi.c ++++ b/drivers/target/target_core_pscsi.c +@@ -1120,7 +1120,7 @@ static u32 pscsi_get_device_type(struct se_device *dev) + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct scsi_device *sd = pdv->pdv_sd; + +- return sd->type; ++ return (sd) ? sd->type : TYPE_NO_LUN; + } + + static sector_t pscsi_get_blocks(struct se_device *dev) +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index be877bf6f730..2e0998420254 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); + out: + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ ++ if (ret && ack_kref) ++ target_put_sess_cmd(se_sess, se_cmd); ++ + return ret; + } + EXPORT_SYMBOL(target_get_sess_cmd); +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index beea6ca73ee5..a69e31e3410f 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -111,7 +111,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) + dw8250_force_idle(p); + writeb(value, p->membase + (UART_LCR << p->regshift)); + } +- dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ /* ++ * FIXME: this deadlocks if port->lock is already held ++ * dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ */ + } + } + +@@ -148,7 +151,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) + dw8250_force_idle(p); + writel(value, p->membase + (UART_LCR << p->regshift)); + } +- dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ /* ++ * FIXME: this deadlocks if port->lock is already held ++ * dev_err(p->dev, "Couldn't set LCR to %d\n", value); ++ */ + } + } + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index beb9d71cd47a..439bd1a5d00c 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev) + "Please send the output of lspci -vv, this\n" + "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" + "manufacturer and name of serial board or\n" +- "modem board to rmk+serial@arm.linux.org.uk.\n", ++ "modem board to .\n", + pci_name(dev), str, dev->vendor, dev->device, + dev->subsystem_vendor, dev->subsystem_device); + } +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index 08048613eed6..db2becd31a51 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -565,7 +565,6 @@ static ssize_t ep_copy_to_user(struct kiocb_priv *priv) + if (total == 0) + break; + } +- + return len; + } + +@@ -584,6 +583,7 @@ static void ep_user_copy_worker(struct work_struct *work) + aio_complete(iocb, ret, ret); + + kfree(priv->buf); ++ kfree(priv->iv); + kfree(priv); + } + +@@ -604,6 +604,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) + */ + if (priv->iv == NULL || unlikely(req->actual == 0)) { + kfree(req->buf); ++ kfree(priv->iv); + kfree(priv); + iocb->private = NULL; + /* aio_complete() reports bytes-transferred _and_ faults */ +@@ -639,7 +640,7 @@ ep_aio_rwtail( + struct usb_request *req; + ssize_t value; + +- priv = kmalloc(sizeof *priv, GFP_KERNEL); ++ priv = kzalloc(sizeof *priv, GFP_KERNEL); + if (!priv) { + value = -ENOMEM; + fail: +@@ -648,7 +649,14 @@ fail: + } + iocb->private = priv; + priv->iocb = iocb; +- priv->iv = iv; ++ if (iv) { ++ priv->iv = kmemdup(iv, nr_segs * sizeof(struct iovec), ++ GFP_KERNEL); ++ if (!priv->iv) { ++ kfree(priv); ++ goto fail; ++ } ++ } + priv->nr_segs = nr_segs; + INIT_WORK(&priv->work, ep_user_copy_worker); + +@@ -688,6 +696,7 @@ fail: + mutex_unlock(&epdata->lock); + + if (unlikely(value)) { ++ kfree(priv->iv); + kfree(priv); + put_ep(epdata); + } else +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index b4bca2d4a7e5..70fba973a107 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) + pirq_query_unmask(irq); + + rc = set_evtchn_to_irq(evtchn, irq); +- if (rc != 0) { +- pr_err("irq%d: Failed to set port to irq mapping (%d)\n", +- irq, rc); +- xen_evtchn_close(evtchn); +- return 0; +- } ++ if (rc) ++ goto err; ++ + bind_evtchn_to_cpu(evtchn, 0); + info->evtchn = evtchn; + ++ rc = xen_evtchn_port_setup(info); ++ if (rc) ++ goto err; ++ + out: + unmask_evtchn(evtchn); + eoi_pirq(irq_get_irq_data(irq)); + + return 0; ++ ++err: ++ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); ++ xen_evtchn_close(evtchn); ++ return 0; + } + + static unsigned int startup_pirq(struct irq_data *data) +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c +index 46ae0f9f02ad..75fe3d466515 100644 +--- a/drivers/xen/xen-pciback/conf_space.c ++++ b/drivers/xen/xen-pciback/conf_space.c +@@ -16,7 +16,7 @@ + #include "conf_space.h" + #include "conf_space_quirks.h" + +-static bool permissive; ++bool permissive; + module_param(permissive, bool, 0644); + + /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, +diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h +index e56c934ad137..2e1d73d1d5d0 100644 +--- a/drivers/xen/xen-pciback/conf_space.h ++++ b/drivers/xen/xen-pciback/conf_space.h +@@ -64,6 +64,8 @@ struct config_field_entry { + void *data; + }; + ++extern bool permissive; ++ + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) + + /* Add fields to a device - the add_fields macro expects to get a pointer to +diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c +index c5ee82587e8c..2d7369391472 100644 +--- a/drivers/xen/xen-pciback/conf_space_header.c ++++ b/drivers/xen/xen-pciback/conf_space_header.c +@@ -11,6 +11,10 @@ + #include "pciback.h" + #include "conf_space.h" + ++struct pci_cmd_info { ++ u16 val; ++}; ++ + struct pci_bar_info { + u32 val; + u32 len_val; +@@ -20,22 +24,36 @@ struct pci_bar_info { + #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) + #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) + +-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) ++/* Bits guests are allowed to control in permissive mode. */ ++#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ ++ PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ ++ PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) ++ ++static void *command_init(struct pci_dev *dev, int offset) + { +- int i; +- int ret; +- +- ret = xen_pcibk_read_config_word(dev, offset, value, data); +- if (!pci_is_enabled(dev)) +- return ret; +- +- for (i = 0; i < PCI_ROM_RESOURCE; i++) { +- if (dev->resource[i].flags & IORESOURCE_IO) +- *value |= PCI_COMMAND_IO; +- if (dev->resource[i].flags & IORESOURCE_MEM) +- *value |= PCI_COMMAND_MEMORY; ++ struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); ++ int err; ++ ++ if (!cmd) ++ return ERR_PTR(-ENOMEM); ++ ++ err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); ++ if (err) { ++ kfree(cmd); ++ return ERR_PTR(err); + } + ++ return cmd; ++} ++ ++static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) ++{ ++ int ret = pci_read_config_word(dev, offset, value); ++ const struct pci_cmd_info *cmd = data; ++ ++ *value &= PCI_COMMAND_GUEST; ++ *value |= cmd->val & ~PCI_COMMAND_GUEST; ++ + return ret; + } + +@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) + { + struct xen_pcibk_dev_data *dev_data; + int err; ++ u16 val; ++ struct pci_cmd_info *cmd = data; + + dev_data = pci_get_drvdata(dev); + if (!pci_is_enabled(dev) && is_enable_cmd(value)) { +@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) + } + } + ++ cmd->val = value; ++ ++ if (!permissive && (!dev_data || !dev_data->permissive)) ++ return 0; ++ ++ /* Only allow the guest to control certain bits. */ ++ err = pci_read_config_word(dev, offset, &val); ++ if (err || val == value) ++ return err; ++ ++ value &= PCI_COMMAND_GUEST; ++ value |= val & ~PCI_COMMAND_GUEST; ++ + return pci_write_config_word(dev, offset, value); + } + +@@ -282,6 +315,8 @@ static const struct config_field header_common[] = { + { + .offset = PCI_COMMAND, + .size = 2, ++ .init = command_init, ++ .release = bar_release, + .u.w.read = command_read, + .u.w.write = command_write, + }, +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index ca887314aba9..f2bbb8513360 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -814,8 +814,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) + + newpage = buf->page; + +- if (WARN_ON(!PageUptodate(newpage))) +- return -EIO; ++ if (!PageUptodate(newpage)) ++ SetPageUptodate(newpage); + + ClearPageMappedToDisk(newpage); + +@@ -1721,6 +1721,9 @@ copy_finish: + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, + unsigned int size, struct fuse_copy_state *cs) + { ++ /* Don't try to move pages (yet) */ ++ cs->move_pages = 0; ++ + switch (code) { + case FUSE_NOTIFY_POLL: + return fuse_notify_poll(fc, size, cs); +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 469086b9f99b..0c3f303baf32 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, + struct the_nilfs *nilfs) + { + struct nilfs_inode_info *ii, *n; ++ int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); + int defer_iput = false; + + spin_lock(&nilfs->ns_inode_lock); +@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, + brelse(ii->i_bh); + ii->i_bh = NULL; + list_del_init(&ii->i_dirty); +- if (!ii->vfs_inode.i_nlink) { ++ if (!ii->vfs_inode.i_nlink || during_mount) { + /* +- * Defer calling iput() to avoid a deadlock +- * over I_SYNC flag for inodes with i_nlink == 0 ++ * Defer calling iput() to avoid deadlocks if ++ * i_nlink == 0 or mount is not yet finished. + */ + list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); + defer_iput = true; +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index e8972bcddfb4..69aa378e60d9 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1291,6 +1291,9 @@ out: + + static int pagemap_open(struct inode *inode, struct file *file) + { ++ /* do not disclose physical addresses: attack vector */ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; + pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " + "to stop being page-shift some time soon. See the " + "linux/Documentation/vm/pagemap.txt for details.\n"); +diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h +index 5672d7ea1fa0..08848050922e 100644 +--- a/include/asm-generic/tlb.h ++++ b/include/asm-generic/tlb.h +@@ -96,10 +96,9 @@ struct mmu_gather { + #endif + unsigned long start; + unsigned long end; +- unsigned int need_flush : 1, /* Did free PTEs */ + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ +- fullmm : 1, ++ unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; +@@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) + tlb_flush_mmu(tlb); + } + ++static inline void __tlb_adjust_range(struct mmu_gather *tlb, ++ unsigned long address) ++{ ++ tlb->start = min(tlb->start, address); ++ tlb->end = max(tlb->end, address + PAGE_SIZE); ++} ++ ++static inline void __tlb_reset_range(struct mmu_gather *tlb) ++{ ++ tlb->start = TASK_SIZE; ++ tlb->end = 0; ++} ++ ++/* ++ * In the case of tlb vma handling, we can optimise these away in the ++ * case where we're doing a full MM flush. When we're doing a munmap, ++ * the vmas are adjusted to only cover the region to be torn down. ++ */ ++#ifndef tlb_start_vma ++#define tlb_start_vma(tlb, vma) do { } while (0) ++#endif ++ ++#define __tlb_end_vma(tlb, vma) \ ++ do { \ ++ if (!tlb->fullmm && tlb->end) { \ ++ tlb_flush(tlb); \ ++ __tlb_reset_range(tlb); \ ++ } \ ++ } while (0) ++ ++#ifndef tlb_end_vma ++#define tlb_end_vma __tlb_end_vma ++#endif ++ ++#ifndef __tlb_remove_tlb_entry ++#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) ++#endif ++ + /** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * +- * Record the fact that pte's were really umapped in ->need_flush, so we can +- * later optimise away the tlb invalidate. This helps when userspace is +- * unmapping already-unmapped pages, which happens quite a lot. ++ * Record the fact that pte's were really unmapped by updating the range, ++ * so we can later optimise away the tlb invalidate. This helps when ++ * userspace is unmapping already-unmapped pages, which happens quite a lot. + */ + #define tlb_remove_tlb_entry(tlb, ptep, address) \ + do { \ +- tlb->need_flush = 1; \ ++ __tlb_adjust_range(tlb, address); \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +@@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) + + #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ + do { \ +- tlb->need_flush = 1; \ ++ __tlb_adjust_range(tlb, address); \ + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ + } while (0) + + #define pte_free_tlb(tlb, ptep, address) \ + do { \ +- tlb->need_flush = 1; \ ++ __tlb_adjust_range(tlb, address); \ + __pte_free_tlb(tlb, ptep, address); \ + } while (0) + + #ifndef __ARCH_HAS_4LEVEL_HACK + #define pud_free_tlb(tlb, pudp, address) \ + do { \ +- tlb->need_flush = 1; \ ++ __tlb_adjust_range(tlb, address); \ + __pud_free_tlb(tlb, pudp, address); \ + } while (0) + #endif + + #define pmd_free_tlb(tlb, pmdp, address) \ + do { \ +- tlb->need_flush = 1; \ ++ __tlb_adjust_range(tlb, address); \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) + +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index b996e6cde6bb..9eb54f41623e 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -70,7 +70,8 @@ enum { + /* data contains off-queue information when !WORK_STRUCT_PWQ */ + WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, + +- WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), ++ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, ++ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), + + /* + * When a work item is off queue, its high bits point to the last +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 1f107c74087b..672310e1597e 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -538,9 +538,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, + + rcu_read_lock(); + cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { +- if (cp == root_cs) +- continue; +- + /* skip the whole subtree if @cp doesn't have any CPU */ + if (cpumask_empty(cp->cpus_allowed)) { + pos_css = css_rightmost_descendant(pos_css); +@@ -863,7 +860,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) + * If it becomes empty, inherit the effective mask of the + * parent, which is guaranteed to have some CPUs. + */ +- if (cpumask_empty(new_cpus)) ++ if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) + cpumask_copy(new_cpus, parent->effective_cpus); + + /* Skip the whole subtree if the cpumask remains the same. */ +@@ -1119,7 +1116,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) + * If it becomes empty, inherit the effective mask of the + * parent, which is guaranteed to have some MEMs. + */ +- if (nodes_empty(*new_mems)) ++ if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) + *new_mems = parent->effective_mems; + + /* Skip the whole subtree if the nodemask remains the same. */ +@@ -1991,7 +1988,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + + mutex_lock(&callback_mutex); + cs->mems_allowed = parent->mems_allowed; ++ cs->effective_mems = parent->mems_allowed; + cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); ++ cpumask_copy(cs->effective_cpus, parent->cpus_allowed); + mutex_unlock(&callback_mutex); + out_unlock: + mutex_unlock(&cpuset_mutex); +diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h +index cbd69d842341..2ca4a8b5fe57 100644 +--- a/kernel/printk/console_cmdline.h ++++ b/kernel/printk/console_cmdline.h +@@ -3,7 +3,7 @@ + + struct console_cmdline + { +- char name[8]; /* Name of the driver */ ++ char name[16]; /* Name of the driver */ + int index; /* Minor dev. to use */ + char *options; /* Options for the driver */ + #ifdef CONFIG_A11Y_BRAILLE_CONSOLE +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index ced2b84b1cb7..bf95fdad4d96 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -2440,6 +2440,7 @@ void register_console(struct console *newcon) + for (i = 0, c = console_cmdline; + i < MAX_CMDLINECONSOLES && c->name[0]; + i++, c++) { ++ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); + if (strcmp(c->name, newcon->name) != 0) + continue; + if (newcon->index >= 0 && +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 124e2c702ead..d1eff3dd8a02 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1053,6 +1053,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) + + static struct pid * const ftrace_swapper_pid = &init_struct_pid; + ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++static int ftrace_graph_active; ++#else ++# define ftrace_graph_active 0 ++#endif ++ + #ifdef CONFIG_DYNAMIC_FTRACE + + static struct ftrace_ops *removed_ops; +@@ -1849,8 +1855,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) + if (!ftrace_rec_count(rec)) + rec->flags = 0; + else +- /* Just disable the record (keep REGS state) */ +- rec->flags &= ~FTRACE_FL_ENABLED; ++ /* ++ * Just disable the record, but keep the ops TRAMP ++ * and REGS states. The _EN flags must be disabled though. ++ */ ++ rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | ++ FTRACE_FL_REGS_EN); + } + + return FTRACE_UPDATE_MAKE_NOP; +@@ -2482,24 +2492,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) + + static void ftrace_startup_sysctl(void) + { ++ int command; ++ + if (unlikely(ftrace_disabled)) + return; + + /* Force update next time */ + saved_ftrace_func = NULL; + /* ftrace_start_up is true if we want ftrace running */ +- if (ftrace_start_up) +- ftrace_run_update_code(FTRACE_UPDATE_CALLS); ++ if (ftrace_start_up) { ++ command = FTRACE_UPDATE_CALLS; ++ if (ftrace_graph_active) ++ command |= FTRACE_START_FUNC_RET; ++ ftrace_startup_enable(command); ++ } + } + + static void ftrace_shutdown_sysctl(void) + { ++ int command; ++ + if (unlikely(ftrace_disabled)) + return; + + /* ftrace_start_up is true if ftrace is running */ +- if (ftrace_start_up) +- ftrace_run_update_code(FTRACE_DISABLE_CALLS); ++ if (ftrace_start_up) { ++ command = FTRACE_DISABLE_CALLS; ++ if (ftrace_graph_active) ++ command |= FTRACE_STOP_FUNC_RET; ++ ftrace_run_update_code(command); ++ } + } + + static cycle_t ftrace_update_time; +@@ -5303,12 +5325,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, + + if (ftrace_enabled) { + +- ftrace_startup_sysctl(); +- + /* we are starting ftrace again */ + if (ftrace_ops_list != &ftrace_list_end) + update_ftrace_function(); + ++ ftrace_startup_sysctl(); ++ + } else { + /* stopping ftrace calls (just send to ftrace_stub) */ + ftrace_trace_function = ftrace_stub; +@@ -5334,8 +5356,6 @@ static struct ftrace_ops graph_ops = { + ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) + }; + +-static int ftrace_graph_active; +- + int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) + { + return 0; +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 66940a53d128..2273f534b01a 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -2710,19 +2710,57 @@ bool flush_work(struct work_struct *work) + } + EXPORT_SYMBOL_GPL(flush_work); + ++struct cwt_wait { ++ wait_queue_t wait; ++ struct work_struct *work; ++}; ++ ++static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) ++{ ++ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); ++ ++ if (cwait->work != key) ++ return 0; ++ return autoremove_wake_function(wait, mode, sync, key); ++} ++ + static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) + { ++ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); + unsigned long flags; + int ret; + + do { + ret = try_to_grab_pending(work, is_dwork, &flags); + /* +- * If someone else is canceling, wait for the same event it +- * would be waiting for before retrying. ++ * If someone else is already canceling, wait for it to ++ * finish. flush_work() doesn't work for PREEMPT_NONE ++ * because we may get scheduled between @work's completion ++ * and the other canceling task resuming and clearing ++ * CANCELING - flush_work() will return false immediately ++ * as @work is no longer busy, try_to_grab_pending() will ++ * return -ENOENT as @work is still being canceled and the ++ * other canceling task won't be able to clear CANCELING as ++ * we're hogging the CPU. ++ * ++ * Let's wait for completion using a waitqueue. As this ++ * may lead to the thundering herd problem, use a custom ++ * wake function which matches @work along with exclusive ++ * wait and wakeup. + */ +- if (unlikely(ret == -ENOENT)) +- flush_work(work); ++ if (unlikely(ret == -ENOENT)) { ++ struct cwt_wait cwait; ++ ++ init_wait(&cwait.wait); ++ cwait.wait.func = cwt_wakefn; ++ cwait.work = work; ++ ++ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, ++ TASK_UNINTERRUPTIBLE); ++ if (work_is_canceling(work)) ++ schedule(); ++ finish_wait(&cancel_waitq, &cwait.wait); ++ } + } while (unlikely(ret < 0)); + + /* tell other tasks trying to grab @work to back off */ +@@ -2731,6 +2769,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) + + flush_work(work); + clear_work_data(work); ++ ++ /* ++ * Paired with prepare_to_wait() above so that either ++ * waitqueue_active() is visible here or !work_is_canceling() is ++ * visible there. ++ */ ++ smp_mb(); ++ if (waitqueue_active(&cancel_waitq)) ++ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); ++ + return ret; + } + +diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c +index 7a85967060a5..f0f5c5c3de12 100644 +--- a/lib/lz4/lz4_decompress.c ++++ b/lib/lz4/lz4_decompress.c +@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize) + /* Error: request to write beyond destination buffer */ + if (cpy > oend) + goto _output_error; ++ if ((ref + COPYLENGTH) > oend || ++ (op + COPYLENGTH) > oend) ++ goto _output_error; + LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); + while (op < cpy) + *op++ = *ref++; +diff --git a/mm/memory.c b/mm/memory.c +index 4ffa7b571fb8..90fb265b32b6 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -220,9 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); + tlb->need_flush_all = 0; +- tlb->start = start; +- tlb->end = end; +- tlb->need_flush = 0; + tlb->local.next = NULL; + tlb->local.nr = 0; + tlb->local.max = ARRAY_SIZE(tlb->__pages); +@@ -232,15 +229,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long + #ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; + #endif ++ ++ __tlb_reset_range(tlb); + } + + static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) + { +- tlb->need_flush = 0; ++ if (!tlb->end) ++ return; ++ + tlb_flush(tlb); + #ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); + #endif ++ __tlb_reset_range(tlb); + } + + static void tlb_flush_mmu_free(struct mmu_gather *tlb) +@@ -256,8 +258,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) + + void tlb_flush_mmu(struct mmu_gather *tlb) + { +- if (!tlb->need_flush) +- return; + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); + } +@@ -292,7 +292,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) + { + struct mmu_gather_batch *batch; + +- VM_BUG_ON(!tlb->need_flush); ++ VM_BUG_ON(!tlb->end); + + batch = tlb->active; + batch->pages[batch->nr++] = page; +@@ -359,8 +359,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) + { + struct mmu_table_batch **batch = &tlb->batch; + +- tlb->need_flush = 1; +- + /* + * When there's less then two users of this mm there cannot be a + * concurrent page-table walk. +@@ -1186,20 +1184,8 @@ again: + arch_leave_lazy_mmu_mode(); + + /* Do the actual TLB flush before dropping ptl */ +- if (force_flush) { +- unsigned long old_end; +- +- /* +- * Flush the TLB just for the previous segment, +- * then update the range to be the remaining +- * TLB range. +- */ +- old_end = tlb->end; +- tlb->end = addr; ++ if (force_flush) + tlb_flush_mmu_tlbonly(tlb); +- tlb->start = addr; +- tlb->end = old_end; +- } + pte_unmap_unlock(start_pte, ptl); + + /* +diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c +index 43f750e88e19..765c78110fa8 100644 +--- a/net/caif/caif_socket.c ++++ b/net/caif/caif_socket.c +@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, + int copylen; + + ret = -EOPNOTSUPP; +- if (m->msg_flags&MSG_OOB) ++ if (flags & MSG_OOB) + goto read_error; + + skb = skb_recv_datagram(sk, flags, 0 , &ret); +diff --git a/net/can/af_can.c b/net/can/af_can.c +index ce82337521f6..d6030d6949df 100644 +--- a/net/can/af_can.c ++++ b/net/can/af_can.c +@@ -262,6 +262,9 @@ int can_send(struct sk_buff *skb, int loop) + goto inval_skb; + } + ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ ++ skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + +diff --git a/net/compat.c b/net/compat.c +index c48930373e65..53e933eb78b8 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -71,6 +71,13 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || + __get_user(kmsg->msg_flags, &umsg->msg_flags)) + return -EFAULT; ++ ++ if (!tmp1) ++ kmsg->msg_namelen = 0; ++ ++ if (kmsg->msg_namelen < 0) ++ return -EINVAL; ++ + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) + kmsg->msg_namelen = sizeof(struct sockaddr_storage); + kmsg->msg_name = compat_ptr(tmp1); +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c +index cf9cd13509a7..e731c96eac4b 100644 +--- a/net/core/sysctl_net_core.c ++++ b/net/core/sysctl_net_core.c +@@ -25,6 +25,8 @@ + static int zero = 0; + static int one = 1; + static int ushort_max = USHRT_MAX; ++static int min_sndbuf = SOCK_MIN_SNDBUF; ++static int min_rcvbuf = SOCK_MIN_RCVBUF; + + #ifdef CONFIG_RPS + static int rps_sock_flow_sysctl(struct ctl_table *table, int write, +@@ -223,7 +225,7 @@ static struct ctl_table net_core_table[] = { + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +- .extra1 = &one, ++ .extra1 = &min_sndbuf, + }, + { + .procname = "rmem_max", +@@ -231,7 +233,7 @@ static struct ctl_table net_core_table[] = { + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +- .extra1 = &one, ++ .extra1 = &min_rcvbuf, + }, + { + .procname = "wmem_default", +@@ -239,7 +241,7 @@ static struct ctl_table net_core_table[] = { + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +- .extra1 = &one, ++ .extra1 = &min_sndbuf, + }, + { + .procname = "rmem_default", +@@ -247,7 +249,7 @@ static struct ctl_table net_core_table[] = { + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +- .extra1 = &one, ++ .extra1 = &min_rcvbuf, + }, + { + .procname = "dev_weight", +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index e34dccbc4d70..4eeba4e497a0 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler( + mutex_unlock(&inet_diag_table_mutex); + } + ++static size_t inet_sk_attr_size(void) ++{ ++ return nla_total_size(sizeof(struct tcp_info)) ++ + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ ++ + nla_total_size(1) /* INET_DIAG_TOS */ ++ + nla_total_size(1) /* INET_DIAG_TCLASS */ ++ + nla_total_size(sizeof(struct inet_diag_meminfo)) ++ + nla_total_size(sizeof(struct inet_diag_msg)) ++ + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) ++ + nla_total_size(TCP_CA_NAME_MAX) ++ + nla_total_size(sizeof(struct tcpvegas_info)) ++ + 64; ++} ++ + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, + struct sk_buff *skb, struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, +@@ -324,9 +338,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s + if (err) + goto out; + +- rep = nlmsg_new(sizeof(struct inet_diag_msg) + +- sizeof(struct inet_diag_meminfo) + +- sizeof(struct tcp_info) + 64, GFP_KERNEL); ++ rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); + if (!rep) { + err = -ENOMEM; + goto out; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index c2df40ba553f..022ecbc9322d 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2739,15 +2739,11 @@ void tcp_send_fin(struct sock *sk) + } else { + /* Socket is locked, keep trying until memory is available. */ + for (;;) { +- skb = alloc_skb_fclone(MAX_TCP_HEADER, +- sk->sk_allocation); ++ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); + if (skb) + break; + yield(); + } +- +- /* Reserve space for headers and prepare control bits. */ +- skb_reserve(skb, MAX_TCP_HEADER); + /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ + tcp_init_nondata_skb(skb, tp->write_seq, + TCPHDR_ACK | TCPHDR_FIN); +@@ -2998,9 +2994,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) + { + struct tcp_sock *tp = tcp_sk(sk); + struct tcp_fastopen_request *fo = tp->fastopen_req; +- int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; +- struct sk_buff *syn_data = NULL, *data; ++ int syn_loss = 0, space, err = 0; + unsigned long last_syn_loss = 0; ++ struct sk_buff *syn_data; + + tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ + tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, +@@ -3031,48 +3027,40 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) + /* limit to order-0 allocations */ + space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); + +- syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, +- sk->sk_allocation); +- if (syn_data == NULL) ++ syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation); ++ if (!syn_data) ++ goto fallback; ++ syn_data->ip_summed = CHECKSUM_PARTIAL; ++ memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); ++ if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space), ++ fo->data->msg_iov, 0, space))) { ++ kfree_skb(syn_data); + goto fallback; ++ } + +- for (i = 0; i < iovlen && syn_data->len < space; ++i) { +- struct iovec *iov = &fo->data->msg_iov[i]; +- unsigned char __user *from = iov->iov_base; +- int len = iov->iov_len; ++ /* No more data pending in inet_wait_for_connect() */ ++ if (space == fo->size) ++ fo->data = NULL; ++ fo->copied = space; + +- if (syn_data->len + len > space) +- len = space - syn_data->len; +- else if (i + 1 == iovlen) +- /* No more data pending in inet_wait_for_connect() */ +- fo->data = NULL; ++ tcp_connect_queue_skb(sk, syn_data); + +- if (skb_add_data(syn_data, from, len)) +- goto fallback; +- } ++ err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); + +- /* Queue a data-only packet after the regular SYN for retransmission */ +- data = pskb_copy(syn_data, sk->sk_allocation); +- if (data == NULL) +- goto fallback; +- TCP_SKB_CB(data)->seq++; +- TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; +- TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); +- tcp_connect_queue_skb(sk, data); +- fo->copied = data->len; +- +- /* syn_data is about to be sent, we need to take current time stamps +- * for the packets that are in write queue : SYN packet and DATA +- */ +- skb_mstamp_get(&syn->skb_mstamp); +- data->skb_mstamp = syn->skb_mstamp; ++ syn->skb_mstamp = syn_data->skb_mstamp; + +- if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { ++ /* Now full SYN+DATA was cloned and sent (or not), ++ * remove the SYN from the original skb (syn_data) ++ * we keep in write queue in case of a retransmit, as we ++ * also have the SYN packet (with no data) in the same queue. ++ */ ++ TCP_SKB_CB(syn_data)->seq++; ++ TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; ++ if (!err) { + tp->syn_data = (fo->copied > 0); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); + goto done; + } +- syn_data = NULL; + + fallback: + /* Send a regular SYN with Fast Open cookie request option */ +@@ -3081,7 +3069,6 @@ fallback: + err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); + if (err) + tp->syn_fastopen = 0; +- kfree_skb(syn_data); + done: + fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ + return err; +@@ -3101,13 +3088,10 @@ int tcp_connect(struct sock *sk) + return 0; + } + +- buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); +- if (unlikely(buff == NULL)) ++ buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); ++ if (unlikely(!buff)) + return -ENOBUFS; + +- /* Reserve space for headers. */ +- skb_reserve(buff, MAX_TCP_HEADER); +- + tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); + tp->retrans_stamp = tcp_time_stamp; + tcp_connect_queue_skb(sk, buff); +diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c +index b4d5e1d97c1b..27ca79682efb 100644 +--- a/net/ipv6/fib6_rules.c ++++ b/net/ipv6/fib6_rules.c +@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, + goto again; + flp6->saddr = saddr; + } ++ err = rt->dst.error; + goto out; + } + again: +diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c +index a817705ce2d0..dba8d0864f18 100644 +--- a/net/rds/iw_rdma.c ++++ b/net/rds/iw_rdma.c +@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, + int *unpinned); + static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); + +-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) ++static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, ++ struct rds_iw_device **rds_iwdev, ++ struct rdma_cm_id **cm_id) + { + struct rds_iw_device *iwdev; + struct rds_iw_cm_id *i_cm_id; +@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd + src_addr->sin_port, + dst_addr->sin_addr.s_addr, + dst_addr->sin_port, +- rs->rs_bound_addr, +- rs->rs_bound_port, +- rs->rs_conn_addr, +- rs->rs_conn_port); ++ src->sin_addr.s_addr, ++ src->sin_port, ++ dst->sin_addr.s_addr, ++ dst->sin_port); + #ifdef WORKING_TUPLE_DETECTION +- if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && +- src_addr->sin_port == rs->rs_bound_port && +- dst_addr->sin_addr.s_addr == rs->rs_conn_addr && +- dst_addr->sin_port == rs->rs_conn_port) { ++ if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && ++ src_addr->sin_port == src->sin_port && ++ dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && ++ dst_addr->sin_port == dst->sin_port) { + #else + /* FIXME - needs to compare the local and remote + * ipaddr/port tuple, but the ipaddr is the only +@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd + * zero'ed. It doesn't appear to be properly populated + * during connection setup... + */ +- if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { ++ if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { + #endif + spin_unlock_irq(&iwdev->spinlock); + *rds_iwdev = iwdev; +@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i + { + struct sockaddr_in *src_addr, *dst_addr; + struct rds_iw_device *rds_iwdev_old; +- struct rds_sock rs; + struct rdma_cm_id *pcm_id; + int rc; + + src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; + dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; + +- rs.rs_bound_addr = src_addr->sin_addr.s_addr; +- rs.rs_bound_port = src_addr->sin_port; +- rs.rs_conn_addr = dst_addr->sin_addr.s_addr; +- rs.rs_conn_port = dst_addr->sin_port; +- +- rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); ++ rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); + if (rc) + rds_iw_remove_cm_id(rds_iwdev, cm_id); + +@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, + struct rds_iw_device *rds_iwdev; + struct rds_iw_mr *ibmr = NULL; + struct rdma_cm_id *cm_id; ++ struct sockaddr_in src = { ++ .sin_addr.s_addr = rs->rs_bound_addr, ++ .sin_port = rs->rs_bound_port, ++ }; ++ struct sockaddr_in dst = { ++ .sin_addr.s_addr = rs->rs_conn_addr, ++ .sin_port = rs->rs_conn_port, ++ }; + int ret; + +- ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); ++ ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); + if (ret || !cm_id) { + ret = -ENODEV; + goto out; +diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c +index e9aaa65c0778..0df95463c650 100644 +--- a/net/rxrpc/ar-recvmsg.c ++++ b/net/rxrpc/ar-recvmsg.c +@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, + if (!skb) { + /* nothing remains on the queue */ + if (copied && +- (msg->msg_flags & MSG_PEEK || timeo == 0)) ++ (flags & MSG_PEEK || timeo == 0)) + goto out; + + /* wait for a message to turn up */ +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c +index 0472909bb014..b78d81f5ffda 100644 +--- a/net/sched/cls_u32.c ++++ b/net/sched/cls_u32.c +@@ -78,8 +78,11 @@ struct tc_u_hnode { + struct tc_u_common *tp_c; + int refcnt; + unsigned int divisor; +- struct tc_u_knode __rcu *ht[1]; + struct rcu_head rcu; ++ /* The 'ht' field MUST be the last field in structure to allow for ++ * more entries allocated at end of structure. ++ */ ++ struct tc_u_knode __rcu *ht[1]; + }; + + struct tc_u_common { +diff --git a/sound/core/control.c b/sound/core/control.c +index b9611344ff9e..82a638a01b24 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1167,6 +1167,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, + + if (info->count < 1) + return -EINVAL; ++ if (!*info->id.name) ++ return -EINVAL; ++ if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) ++ return -EINVAL; + access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : + (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| + SNDRV_CTL_ELEM_ACCESS_INACTIVE| +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index 84c94301bfaf..20aa52b14b84 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -1160,7 +1160,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, + } + } + +- if (!bus->no_response_fallback) ++ if (bus->no_response_fallback) + return -1; + + if (!chip->polling_mode && chip->poll_count < 2) { +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c +index 64220c08bd98..6c6e35aba989 100644 +--- a/sound/pci/hda/hda_generic.c ++++ b/sound/pci/hda/hda_generic.c +@@ -637,12 +637,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, + return val; + } + ++/* is this a stereo widget or a stereo-to-mono mix? */ ++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir) ++{ ++ unsigned int wcaps = get_wcaps(codec, nid); ++ hda_nid_t conn; ++ ++ if (wcaps & AC_WCAP_STEREO) ++ return true; ++ if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX) ++ return false; ++ if (snd_hda_get_num_conns(codec, nid) != 1) ++ return false; ++ if (snd_hda_get_connections(codec, nid, &conn, 1) < 0) ++ return false; ++ return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO); ++} ++ + /* initialize the amp value (only at the first time) */ + static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) + { + unsigned int caps = query_amp_caps(codec, nid, dir); + int val = get_amp_val_to_activate(codec, nid, dir, caps, false); +- snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); ++ ++ if (is_stereo_amps(codec, nid, dir)) ++ snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); ++ else ++ snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); ++} ++ ++/* update the amp, doing in stereo or mono depending on NID */ ++static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, ++ unsigned int mask, unsigned int val) ++{ ++ if (is_stereo_amps(codec, nid, dir)) ++ return snd_hda_codec_amp_stereo(codec, nid, dir, idx, ++ mask, val); ++ else ++ return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, ++ mask, val); + } + + /* calculate amp value mask we can modify; +@@ -682,7 +715,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, + return; + + val &= mask; +- snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); ++ update_amp(codec, nid, dir, idx, mask, val); + } + + static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, +@@ -4331,13 +4364,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) + has_amp = nid_has_mute(codec, mix, HDA_INPUT); + for (i = 0; i < nums; i++) { + if (has_amp) +- snd_hda_codec_amp_stereo(codec, mix, +- HDA_INPUT, i, +- 0xff, HDA_AMP_MUTE); ++ update_amp(codec, mix, HDA_INPUT, i, ++ 0xff, HDA_AMP_MUTE); + else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) +- snd_hda_codec_amp_stereo(codec, conn[i], +- HDA_OUTPUT, 0, +- 0xff, HDA_AMP_MUTE); ++ update_amp(codec, conn[i], HDA_OUTPUT, 0, ++ 0xff, HDA_AMP_MUTE); + } + } + +diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c +index ce5a6da83419..05e19f78b4cb 100644 +--- a/sound/pci/hda/hda_proc.c ++++ b/sound/pci/hda/hda_proc.c +@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer, + (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); + } + ++/* is this a stereo widget or a stereo-to-mono mix? */ ++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, ++ int dir, unsigned int wcaps, int indices) ++{ ++ hda_nid_t conn; ++ ++ if (wcaps & AC_WCAP_STEREO) ++ return true; ++ /* check for a stereo-to-mono mix; it must be: ++ * only a single connection, only for input, and only a mixer widget ++ */ ++ if (indices != 1 || dir != HDA_INPUT || ++ get_wcaps_type(wcaps) != AC_WID_AUD_MIX) ++ return false; ++ ++ if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0) ++ return false; ++ /* the connection source is a stereo? */ ++ wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP); ++ return !!(wcaps & AC_WCAP_STEREO); ++} ++ + static void print_amp_vals(struct snd_info_buffer *buffer, + struct hda_codec *codec, hda_nid_t nid, +- int dir, int stereo, int indices) ++ int dir, unsigned int wcaps, int indices) + { + unsigned int val; ++ bool stereo; + int i; + ++ stereo = is_stereo_amps(codec, nid, dir, wcaps, indices); ++ + dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; + for (i = 0; i < indices; i++) { + snd_iprintf(buffer, " ["); +@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry, + (codec->single_adc_amp && + wid_type == AC_WID_AUD_IN)) + print_amp_vals(buffer, codec, nid, HDA_INPUT, +- wid_caps & AC_WCAP_STEREO, +- 1); ++ wid_caps, 1); + else + print_amp_vals(buffer, codec, nid, HDA_INPUT, +- wid_caps & AC_WCAP_STEREO, +- conn_len); ++ wid_caps, conn_len); + } + if (wid_caps & AC_WCAP_OUT_AMP) { + snd_iprintf(buffer, " Amp-Out caps: "); +@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry, + if (wid_type == AC_WID_PIN && + codec->pin_amp_workaround) + print_amp_vals(buffer, codec, nid, HDA_OUTPUT, +- wid_caps & AC_WCAP_STEREO, +- conn_len); ++ wid_caps, conn_len); + else + print_amp_vals(buffer, codec, nid, HDA_OUTPUT, +- wid_caps & AC_WCAP_STEREO, 1); ++ wid_caps, 1); + } + + switch (wid_type) { +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c +index 1589c9bcce3e..dd2b3d92071f 100644 +--- a/sound/pci/hda/patch_cirrus.c ++++ b/sound/pci/hda/patch_cirrus.c +@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { + SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), + SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), + SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), ++ SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), + SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), + SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), + {} /* terminator */ +@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) + return -ENOMEM; + + spec->gen.automute_hook = cs_automute; ++ codec->single_adc_amp = 1; + + snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, + cs420x_fixups); +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index e9ebc7bd752c..1b1e8c68edd6 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -223,6 +223,7 @@ enum { + CXT_PINCFG_LENOVO_TP410, + CXT_PINCFG_LEMOTE_A1004, + CXT_PINCFG_LEMOTE_A1205, ++ CXT_PINCFG_COMPAQ_CQ60, + CXT_FIXUP_STEREO_DMIC, + CXT_FIXUP_INC_MIC_BOOST, + CXT_FIXUP_HEADPHONE_MIC_PIN, +@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { + .type = HDA_FIXUP_PINS, + .v.pins = cxt_pincfg_lemote, + }, ++ [CXT_PINCFG_COMPAQ_CQ60] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ /* 0x17 was falsely set up as a mic, it should 0x1d */ ++ { 0x17, 0x400001f0 }, ++ { 0x1d, 0x97a70120 }, ++ { } ++ } ++ }, + [CXT_FIXUP_STEREO_DMIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_stereo_dmic, +@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { + }; + + static const struct snd_pci_quirk cxt5051_fixups[] = { ++ SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), + {} + }; +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index 83bddbdb90e9..5293b5ac8b9d 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), + } + } + }, ++{ ++ USB_DEVICE(0x0582, 0x0159), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ /* .vendor_name = "Roland", */ ++ /* .product_name = "UA-22", */ ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_COMPOSITE, ++ .data = (const struct snd_usb_audio_quirk[]) { ++ { ++ .ifnum = 0, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 1, ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE ++ }, ++ { ++ .ifnum = 2, ++ .type = QUIRK_MIDI_FIXED_ENDPOINT, ++ .data = & (const struct snd_usb_midi_endpoint_info) { ++ .out_cables = 0x0001, ++ .in_cables = 0x0001 ++ } ++ }, ++ { ++ .ifnum = -1 ++ } ++ } ++ } ++}, + /* this catches most recent vendor-specific Roland devices */ + { + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 3cee7b167052..cfbe0e7d1c45 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -2417,6 +2417,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) + case KVM_CAP_SIGNAL_MSI: + #endif + #ifdef CONFIG_HAVE_KVM_IRQFD ++ case KVM_CAP_IRQFD: + case KVM_CAP_IRQFD_RESAMPLE: + #endif + case KVM_CAP_CHECK_EXTENSION_VM: -- cgit v1.2.3-65-gdbad