summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2016-02-16 10:28:51 -0500
committerMike Pagano <mpagano@gentoo.org>2016-02-16 10:28:51 -0500
commit85527ab7ede917abc023e0e77dabb8500b8b3cf1 (patch)
treeeaf5cf368d0a7e47b0429d1d66b120f898712702
parentLinux patch 4.1.17 (diff)
downloadlinux-patches-85527ab7ede917abc023e0e77dabb8500b8b3cf1.tar.gz
linux-patches-85527ab7ede917abc023e0e77dabb8500b8b3cf1.tar.bz2
linux-patches-85527ab7ede917abc023e0e77dabb8500b8b3cf1.zip
Linux patch 4.1.184.1-23
-rw-r--r--0000_README4
-rw-r--r--1017_linux-4.1.18.patch11807
2 files changed, 11811 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 8b9fa0f1..ed66531a 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch: 1016_linux-4.1.17.patch
From: http://www.kernel.org
Desc: Linux 4.1.17
+Patch: 1017_linux-4.1.18.patch
+From: http://www.kernel.org
+Desc: Linux 4.1.18
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1017_linux-4.1.18.patch b/1017_linux-4.1.18.patch
new file mode 100644
index 00000000..ef682b72
--- /dev/null
+++ b/1017_linux-4.1.18.patch
@@ -0,0 +1,11807 @@
+diff --git a/Makefile b/Makefile
+index d398dd440bc9..001375cfd815 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Series 4800
+
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index 0c12ffb155a2..f775d7161ffb 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -161,10 +161,9 @@ choice
+ mobile SoCs in the Kona family of chips (e.g. bcm28155,
+ bcm11351, etc...)
+
+- config DEBUG_BCM63XX
++ config DEBUG_BCM63XX_UART
+ bool "Kernel low-level debugging on BCM63XX UART"
+ depends on ARCH_BCM_63XX
+- select DEBUG_UART_BCM63XX
+
+ config DEBUG_BERLIN_UART
+ bool "Marvell Berlin SoC Debug UART"
+@@ -1304,7 +1303,7 @@ config DEBUG_LL_INCLUDE
+ default "debug/vf.S" if DEBUG_VF_UART
+ default "debug/vt8500.S" if DEBUG_VT8500_UART0
+ default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
+- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
++ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
+ default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ default "mach/debug-macro.S"
+
+@@ -1320,10 +1319,6 @@ config DEBUG_UART_8250
+ ARCH_IOP33X || ARCH_IXP4XX || \
+ ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+
+-# Compatibility options for BCM63xx
+-config DEBUG_UART_BCM63XX
+- def_bool ARCH_BCM_63XX
+-
+ config DEBUG_UART_PHYS
+ hex "Physical base address of debug UART"
+ default 0x00100a00 if DEBUG_NETX_UART
+@@ -1415,7 +1410,7 @@ config DEBUG_UART_PHYS
+ default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
+- default 0xfffe8600 if DEBUG_UART_BCM63XX
++ default 0xfffe8600 if DEBUG_BCM63XX_UART
+ default 0xfffff700 if ARCH_IOP33X
+ depends on ARCH_EP93XX || \
+ DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+@@ -1427,7 +1422,7 @@ config DEBUG_UART_PHYS
+ DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
+ DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
+ DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
+- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+
+ config DEBUG_UART_VIRT
+@@ -1466,7 +1461,7 @@ config DEBUG_UART_VIRT
+ default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
+ default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ default 0xfc40ab00 if DEBUG_BRCMSTB_UART
+- default 0xfcfe8600 if DEBUG_UART_BCM63XX
++ default 0xfcfe8600 if DEBUG_BCM63XX_UART
+ default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ default 0xfd000000 if ARCH_SPEAR13XX
+ default 0xfd012000 if ARCH_MV78XX0
+@@ -1516,7 +1511,7 @@ config DEBUG_UART_VIRT
+ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
+ DEBUG_NETX_UART || \
+ DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
+- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+
+ config DEBUG_UART_8250_SHIFT
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
+index 78514ab0b47a..757ac079e7f2 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -288,16 +288,6 @@
+ gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
+ };
+
+- reg_usb2_1_vbus: v5-vbus1 {
+- compatible = "regulator-fixed";
+- regulator-name = "v5.0-vbus1";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- enable-active-high;
+- regulator-always-on;
+- gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
+- };
+-
+ reg_sata0: pwr-sata0 {
+ compatible = "regulator-fixed";
+ regulator-name = "pwr_en_sata0";
+diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+index c740e1a2a3a5..4f29968076ce 100644
+--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+@@ -98,7 +98,7 @@
+
+ phy0: ethernet-phy@1 {
+ interrupt-parent = <&pioE>;
+- interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ reg = <1>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
+index 45e7761b7a29..d4d24a081404 100644
+--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
+@@ -141,8 +141,15 @@
+ };
+
+ macb0: ethernet@f8020000 {
++ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
+ phy-mode = "rmii";
+ status = "okay";
++
++ ethernet-phy@1 {
++ reg = <0x1>;
++ interrupt-parent = <&pioE>;
++ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
++ };
+ };
+
+ mmc1: mmc@fc000000 {
+@@ -174,6 +181,10 @@
+
+ pinctrl@fc06a000 {
+ board {
++ pinctrl_macb0_phy_irq: macb0_phy_irq {
++ atmel,pins =
++ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
++ };
+ pinctrl_mmc0_cd: mmc0_cd {
+ atmel,pins =
+ <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 9cf0ab62db7d..cf11660f35a1 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -1219,7 +1219,7 @@
+ dbgu: serial@fc069000 {
+ compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+ reg = <0xfc069000 0x200>;
+- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
++ interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dbgu>;
+ clocks = <&dbgu_clk>;
+diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+index f182f6538e90..89ed9b45d533 100644
+--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+@@ -122,22 +122,14 @@
+ };
+ mmcsd_default_mode: mmcsd_default {
+ mmcsd_default_cfg1 {
+- /* MCCLK */
+- pins = "GPIO8_B10";
+- ste,output = <0>;
+- };
+- mmcsd_default_cfg2 {
+- /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
+- pins = "GPIO10_C11", "GPIO15_A12",
+- "GPIO16_C13", "GPIO23_D15";
+- ste,output = <1>;
+- };
+- mmcsd_default_cfg3 {
+- /* MCCMD, MCDAT3-0, MCMSFBCLK */
+- pins = "GPIO9_A10", "GPIO11_B11",
+- "GPIO12_A11", "GPIO13_C12",
+- "GPIO14_B12", "GPIO24_C15";
+- ste,input = <1>;
++ /*
++ * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
++ * MCCMD, MCDAT3-0, MCMSFBCLK
++ */
++ pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
++ "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
++ "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
++ ste,output = <2>;
+ };
+ };
+ };
+@@ -802,10 +794,21 @@
+ clock-names = "mclk", "apb_pclk";
+ interrupt-parent = <&vica>;
+ interrupts = <22>;
+- max-frequency = <48000000>;
++ max-frequency = <400000>;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
++ full-pwr-cycle;
++ /*
++ * The STw4811 circuit used with the Nomadik strictly
++ * requires that all of these signal direction pins be
++ * routed and used for its 4-bit levelshifter.
++ */
++ st,sig-dir-dat0;
++ st,sig-dir-dat2;
++ st,sig-dir-dat31;
++ st,sig-dir-cmd;
++ st,sig-pin-fbclk;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
+ vmmc-supply = <&vmmc_regulator>;
+diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
+index eafd120b53f1..8e2a7acb823b 100644
+--- a/arch/arm/mach-omap2/sleep34xx.S
++++ b/arch/arm/mach-omap2/sleep34xx.S
+@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
+ stmfd sp!, {lr} @ save registers on stack
+ /* Setup so that we will disable and enable l2 */
+ mov r1, #0x1
+- adrl r2, l2dis_3630 @ may be too distant for plain adr
+- str r1, [r2]
++ adrl r3, l2dis_3630_offset @ may be too distant for plain adr
++ ldr r2, [r3] @ value for offset
++ str r1, [r2, r3] @ write to l2dis_3630
+ ldmfd sp!, {pc} @ restore regs and return
+ ENDPROC(enable_omap3630_toggle_l2_on_restore)
+
+- .text
+-/* Function to call rom code to save secure ram context */
++/*
++ * Function to call rom code to save secure ram context. This gets
++ * relocated to SRAM, so it can be all in .data section. Otherwise
++ * we need to initialize api_params separately.
++ */
++ .data
+ .align 3
+ ENTRY(save_secure_ram_context)
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
+@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
+ ENTRY(save_secure_ram_context_sz)
+ .word . - save_secure_ram_context
+
++ .text
++
+ /*
+ * ======================
+ * == Idle entry point ==
+@@ -289,12 +296,6 @@ wait_sdrc_ready:
+ bic r5, r5, #0x40
+ str r5, [r4]
+
+-/*
+- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
+- * base instead.
+- * Be careful not to clobber r7 when maintaing this code.
+- */
+-
+ is_dll_in_lock_mode:
+ /* Is dll in lock mode? */
+ ldr r4, sdrc_dlla_ctrl
+@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
+ tst r5, #0x4
+ bne exit_nonoff_modes @ Return if locked
+ /* wait till dll locks */
+- adr r7, kick_counter
+ wait_dll_lock_timed:
+- ldr r4, wait_dll_lock_counter
+- add r4, r4, #1
+- str r4, [r7, #wait_dll_lock_counter - kick_counter]
+ ldr r4, sdrc_dlla_status
+ /* Wait 20uS for lock */
+ mov r6, #8
+@@ -330,9 +327,6 @@ kick_dll:
+ orr r6, r6, #(1<<3) @ enable dll
+ str r6, [r4]
+ dsb
+- ldr r4, kick_counter
+- add r4, r4, #1
+- str r4, [r7] @ kick_counter
+ b wait_dll_lock_timed
+
+ exit_nonoff_modes:
+@@ -360,15 +354,6 @@ sdrc_dlla_status:
+ .word SDRC_DLLA_STATUS_V
+ sdrc_dlla_ctrl:
+ .word SDRC_DLLA_CTRL_V
+- /*
+- * When exporting to userspace while the counters are in SRAM,
+- * these 2 words need to be at the end to facilitate retrival!
+- */
+-kick_counter:
+- .word 0
+-wait_dll_lock_counter:
+- .word 0
+-
+ ENTRY(omap3_do_wfi_sz)
+ .word . - omap3_do_wfi
+
+@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
+ cmp r2, #0x0 @ Check if target power state was OFF or RET
+ bne logic_l1_restore
+
+- ldr r0, l2dis_3630
++ adr r1, l2dis_3630_offset @ address for offset
++ ldr r0, [r1] @ value for offset
++ ldr r0, [r1, r0] @ value at l2dis_3630
+ cmp r0, #0x1 @ should we disable L2 on 3630?
+ bne skipl2dis
+ mrc p15, 0, r0, c1, c0, 1
+@@ -506,7 +493,9 @@ l2_inv_gp:
+ mov r12, #0x2
+ smc #0 @ Call SMI monitor (smieq)
+ logic_l1_restore:
+- ldr r1, l2dis_3630
++ adr r0, l2dis_3630_offset @ adress for offset
++ ldr r1, [r0] @ value for offset
++ ldr r1, [r0, r1] @ value at l2dis_3630
+ cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
+ bne skipl2reen
+ mrc p15, 0, r1, c1, c0, 1
+@@ -535,6 +524,10 @@ control_stat:
+ .word CONTROL_STAT
+ control_mem_rta:
+ .word CONTROL_MEM_RTA_CTRL
++l2dis_3630_offset:
++ .long l2dis_3630 - .
++
++ .data
+ l2dis_3630:
+ .word 0
+
+diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
+index ad1bb9431e94..5373a3281779 100644
+--- a/arch/arm/mach-omap2/sleep44xx.S
++++ b/arch/arm/mach-omap2/sleep44xx.S
+@@ -29,12 +29,6 @@
+ dsb
+ .endm
+
+-ppa_zero_params:
+- .word 0x0
+-
+-ppa_por_params:
+- .word 1, 0
+-
+ #ifdef CONFIG_ARCH_OMAP4
+
+ /*
+@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
+ beq skip_ns_smp_enable
+ ppa_actrl_retry:
+ mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
+- adr r3, ppa_zero_params @ Pointer to parameters
++ adr r1, ppa_zero_params_offset
++ ldr r3, [r1]
++ add r3, r3, r1 @ Pointer to ppa_zero_params
+ mov r1, #0x0 @ Process ID
+ mov r2, #0x4 @ Flag
+ mov r6, #0xff
+@@ -303,7 +299,9 @@ skip_ns_smp_enable:
+ ldr r0, =OMAP4_PPA_L2_POR_INDEX
+ ldr r1, =OMAP44XX_SAR_RAM_BASE
+ ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+- adr r3, ppa_por_params
++ adr r1, ppa_por_params_offset
++ ldr r3, [r1]
++ add r3, r3, r1 @ Pointer to ppa_por_params
+ str r4, [r3, #0x04]
+ mov r1, #0x0 @ Process ID
+ mov r2, #0x4 @ Flag
+@@ -328,6 +326,8 @@ skip_l2en:
+ #endif
+
+ b cpu_resume @ Jump to generic resume
++ppa_por_params_offset:
++ .long ppa_por_params - .
+ ENDPROC(omap4_cpu_resume)
+ #endif /* CONFIG_ARCH_OMAP4 */
+
+@@ -382,4 +382,13 @@ ENTRY(omap_do_wfi)
+ nop
+
+ ldmfd sp!, {pc}
++ppa_zero_params_offset:
++ .long ppa_zero_params - .
+ ENDPROC(omap_do_wfi)
++
++ .data
++ppa_zero_params:
++ .word 0
++
++ppa_por_params:
++ .word 1, 0
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 36aa31ff2c06..cc7435c9676e 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -566,9 +566,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ #endif
+
+ /* EL2 debug */
++ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
++ sbfx x0, x0, #8, #4
++ cmp x0, #1
++ b.lt 4f // Skip if no PMU present
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
++4:
+
+ /* Stage-2 translation */
+ msr vttbr_el2, xzr
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 7778453762d8..b67b01cb5109 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -1242,9 +1242,6 @@ static void armv8pmu_reset(void *info)
+
+ /* Initialize & Reset PMNC: C and P bits. */
+ armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
+-
+- /* Disable access from userspace. */
+- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
+ }
+
+ static int armv8_pmuv3_map_event(struct perf_event *event)
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index e47ed1c5dce1..545710f854f8 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
+ if (end < MODULES_VADDR || end >= MODULES_END)
+ return -EINVAL;
+
++ if (!numpages)
++ return 0;
++
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
+index 4c4d93c4bf65..d69dffffaa89 100644
+--- a/arch/arm64/mm/proc-macros.S
++++ b/arch/arm64/mm/proc-macros.S
+@@ -62,3 +62,15 @@
+ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ #endif
+ .endm
++
++/*
++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
++ */
++ .macro reset_pmuserenr_el0, tmpreg
++ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
++ sbfx \tmpreg, \tmpreg, #8, #4
++ cmp \tmpreg, #1 // Skip if no PMU present
++ b.lt 9000f
++ msr pmuserenr_el0, xzr // Disable PMU access from EL0
++9000:
++ .endm
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index cdd754e19b9b..d253908a988d 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -165,6 +165,7 @@ ENTRY(cpu_do_resume)
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+@@ -202,7 +203,9 @@ ENTRY(__cpu_setup)
+
+ mov x0, #3 << 20
+ msr cpacr_el1, x0 // Enable FP/ASIMD
+- msr mdscr_el1, xzr // Reset mdscr_el1
++ mov x0, #1 << 12 // Reset mdscr_el1 and disable
++ msr mdscr_el1, x0 // access to the DCC from EL0
++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ /*
+ * Memory region attributes for LPAE:
+ *
+diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
+index 0392112a5d70..a5ecef7188ba 100644
+--- a/arch/m32r/kernel/setup.c
++++ b/arch/m32r/kernel/setup.c
+@@ -81,7 +81,10 @@ static struct resource code_resource = {
+ };
+
+ unsigned long memory_start;
++EXPORT_SYMBOL(memory_start);
++
+ unsigned long memory_end;
++EXPORT_SYMBOL(memory_end);
+
+ void __init setup_arch(char **);
+ int get_cpuinfo(char *);
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 70f6e7f073b0..7fe24aef7fdc 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ static inline pte_t pte_mkyoung(pte_t pte)
+ {
+ pte_val(pte) |= _PAGE_ACCESSED;
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (!(pte_val(pte) & _PAGE_NO_READ))
+ pte_val(pte) |= _PAGE_SILENT_READ;
+ else
+@@ -558,7 +558,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+ else
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 97c87027c17f..90b0e8316790 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+ #endif
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_has_rixi) {
+ #ifdef _PAGE_NO_EXEC_SHIFT
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
+index d7034728f377..1c75565d984b 100644
+--- a/arch/parisc/include/uapi/asm/siginfo.h
++++ b/arch/parisc/include/uapi/asm/siginfo.h
+@@ -1,6 +1,10 @@
+ #ifndef _PARISC_SIGINFO_H
+ #define _PARISC_SIGINFO_H
+
++#if defined(__LP64__)
++#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
++#endif
++
+ #include <asm-generic/siginfo.h>
+
+ #undef NSIGTRAP
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 35f0b62259bb..22f6d954ef89 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -861,32 +861,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
+ const char *eeh_pe_loc_get(struct eeh_pe *pe)
+ {
+ struct pci_bus *bus = eeh_pe_bus_get(pe);
+- struct device_node *dn = pci_bus_to_OF_node(bus);
++ struct device_node *dn;
+ const char *loc = NULL;
+
+- if (!dn)
+- goto out;
++ while (bus) {
++ dn = pci_bus_to_OF_node(bus);
++ if (!dn) {
++ bus = bus->parent;
++ continue;
++ }
+
+- /* PHB PE or root PE ? */
+- if (pci_is_root_bus(bus)) {
+- loc = of_get_property(dn, "ibm,loc-code", NULL);
+- if (!loc)
++ if (pci_is_root_bus(bus))
+ loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
++ else
++ loc = of_get_property(dn, "ibm,slot-location-code",
++ NULL);
++
+ if (loc)
+- goto out;
++ return loc;
+
+- /* Check the root port */
+- dn = dn->child;
+- if (!dn)
+- goto out;
++ bus = bus->parent;
+ }
+
+- loc = of_get_property(dn, "ibm,loc-code", NULL);
+- if (!loc)
+- loc = of_get_property(dn, "ibm,slot-location-code", NULL);
+-
+-out:
+- return loc ? loc : "N/A";
++ return "N/A";
+ }
+
+ /**
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index ffd98b2bfa16..f8338e6d3dd7 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2047,7 +2047,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
+ 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
+- rlwimi r5, r4, 1, DAWRX_WT
++ rlwimi r5, r4, 2, DAWRX_WT
+ clrrdi r4, r4, 3
+ std r4, VCPU_DAWR(r3)
+ std r5, VCPU_DAWRX(r3)
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index ac3ddf115f3d..c8fe9ab10792 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -915,21 +915,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+ r = -ENXIO;
+ break;
+ }
+- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
++ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
++ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVM_REG_PPC_VRSAVE:
+- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+- r = -ENXIO;
+- break;
+- }
+- vcpu->arch.vrsave = set_reg_val(reg->id, val);
++ val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ break;
+ #endif /* CONFIG_ALTIVEC */
+ default:
+@@ -970,17 +966,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+ r = -ENXIO;
+ break;
+ }
+- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
++ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
++ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_VRSAVE:
+- val = get_reg_val(reg->id, vcpu->arch.vrsave);
++ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
++ r = -ENXIO;
++ break;
++ }
++ vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ break;
+ #endif /* CONFIG_ALTIVEC */
+ default:
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 4d1ee88864e8..18c8b819b0aa 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
+ int i;
+
+ /* Normalize entries to being relative to the start of the section */
+- for (p = start, i = 0; p < finish; p++, i += 8)
++ for (p = start, i = 0; p < finish; p++, i += 8) {
+ p->insn += i;
++ p->fixup += i + 4;
++ }
+ sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ /* Denormalize all entries */
+- for (p = start, i = 0; p < finish; p++, i += 8)
++ for (p = start, i = 0; p < finish; p++, i += 8) {
+ p->insn -= i;
++ p->fixup -= i + 4;
++ }
+ }
+
+ #ifdef CONFIG_MODULES
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 30e7ddb27a3a..c690c8e16a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -413,7 +413,7 @@ out:
+
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+- int ret;
++ long ret;
+
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 47f1ff056a54..22a358ef1b0c 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
+ {
+ int pid, n, status;
+
++ fflush(stdout);
++
+ pid = fork();
+ if (pid == 0)
+ ptrace_child();
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 78f0c8cbe316..74fcdf3f1534 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -337,20 +337,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+ }
+ static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
+ {
++ pgprotval_t val = pgprot_val(pgprot);
+ pgprot_t new;
+- unsigned long val;
+
+- val = pgprot_val(pgprot);
+ pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+ return new;
+ }
+ static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
+ {
++ pgprotval_t val = pgprot_val(pgprot);
+ pgprot_t new;
+- unsigned long val;
+
+- val = pgprot_val(pgprot);
+ pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT_LARGE) >>
+ (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 89af288ec674..2dd9b3ad3bb5 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -33,7 +33,7 @@ struct cpa_data {
+ pgd_t *pgd;
+ pgprot_t mask_set;
+ pgprot_t mask_clr;
+- int numpages;
++ unsigned long numpages;
+ int flags;
+ unsigned long pfn;
+ unsigned force_split : 1;
+@@ -1324,7 +1324,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+ * CPA operation. Either a large page has been
+ * preserved or a single page update happened.
+ */
+- BUG_ON(cpa->numpages > numpages);
++ BUG_ON(cpa->numpages > numpages || !cpa->numpages);
+ numpages -= cpa->numpages;
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+ cpa->curpage++;
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index f22cc56fd1b3..9641b74b53ef 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
+ goto unlock;
+
+ type->ops->owner = THIS_MODULE;
++ if (type->ops_nokey)
++ type->ops_nokey->owner = THIS_MODULE;
+ node->type = type;
+ list_add(&node->list, &alg_types);
+ err = 0;
+@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
+ }
+ EXPORT_SYMBOL_GPL(af_alg_release);
+
++void af_alg_release_parent(struct sock *sk)
++{
++ struct alg_sock *ask = alg_sk(sk);
++ unsigned int nokey = ask->nokey_refcnt;
++ bool last = nokey && !ask->refcnt;
++
++ sk = ask->parent;
++ ask = alg_sk(sk);
++
++ lock_sock(sk);
++ ask->nokey_refcnt -= nokey;
++ if (!last)
++ last = !--ask->refcnt;
++ release_sock(sk);
++
++ if (last)
++ sock_put(sk);
++}
++EXPORT_SYMBOL_GPL(af_alg_release_parent);
++
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+ struct sock *sk = sock->sk;
+@@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ struct sockaddr_alg *sa = (void *)uaddr;
+ const struct af_alg_type *type;
+ void *private;
++ int err;
+
+ if (sock->state == SS_CONNECTED)
+ return -EINVAL;
+@@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ return PTR_ERR(private);
+ }
+
++ err = -EBUSY;
+ lock_sock(sk);
++ if (ask->refcnt | ask->nokey_refcnt)
++ goto unlock;
+
+ swap(ask->type, type);
+ swap(ask->private, private);
+
++ err = 0;
++
++unlock:
+ release_sock(sk);
+
+ alg_do_release(type, private);
+
+- return 0;
++ return err;
+ }
+
+ static int alg_setkey(struct sock *sk, char __user *ukey,
+@@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_type *type;
+- int err = -ENOPROTOOPT;
++ int err = -EBUSY;
+
+ lock_sock(sk);
++ if (ask->refcnt)
++ goto unlock;
++
+ type = ask->type;
+
++ err = -ENOPROTOOPT;
+ if (level != SOL_ALG || !type)
+ goto unlock;
+
+@@ -235,6 +268,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_type *type;
+ struct sock *sk2;
++ unsigned int nokey;
+ int err;
+
+ lock_sock(sk);
+@@ -254,20 +288,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ security_sk_clone(sk, sk2);
+
+ err = type->accept(ask->private, sk2);
+- if (err) {
+- sk_free(sk2);
++
++ nokey = err == -ENOKEY;
++ if (nokey && type->accept_nokey)
++ err = type->accept_nokey(ask->private, sk2);
++
++ if (err)
+ goto unlock;
+- }
+
+ sk2->sk_family = PF_ALG;
+
+- sock_hold(sk);
++ if (nokey || !ask->refcnt++)
++ sock_hold(sk);
++ ask->nokey_refcnt += nokey;
+ alg_sk(sk2)->parent = sk;
+ alg_sk(sk2)->type = type;
++ alg_sk(sk2)->nokey_refcnt = nokey;
+
+ newsock->ops = type->ops;
+ newsock->state = SS_CONNECTED;
+
++ if (nokey)
++ newsock->ops = type->ops_nokey;
++
+ err = 0;
+
+ unlock:
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 9c1dc8d6106a..d19b52324cf5 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ struct ahash_alg *alg = crypto_ahash_alg(hash);
+
+ hash->setkey = ahash_nosetkey;
++ hash->has_setkey = false;
+ hash->export = ahash_no_export;
+ hash->import = ahash_no_import;
+
+@@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ hash->finup = alg->finup ?: ahash_def_finup;
+ hash->digest = alg->digest;
+
+- if (alg->setkey)
++ if (alg->setkey) {
+ hash->setkey = alg->setkey;
++ hash->has_setkey = true;
++ }
+ if (alg->export)
+ hash->export = alg->export;
+ if (alg->import)
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 1396ad0787fc..d7a3435280d8 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -34,6 +34,11 @@ struct hash_ctx {
+ struct ahash_request req;
+ };
+
++struct algif_hash_tfm {
++ struct crypto_ahash *hash;
++ bool has_key;
++};
++
+ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored)
+ {
+@@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+
+ lock_sock(sk);
+ if (!ctx->more) {
+- err = crypto_ahash_init(&ctx->req);
++ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
++ &ctx->completion);
+ if (err)
+ goto unlock;
+ }
+@@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ } else {
+ if (!ctx->more) {
+ err = crypto_ahash_init(&ctx->req);
++ err = af_alg_wait_for_completion(err, &ctx->completion);
+ if (err)
+ goto unlock;
+ }
+@@ -227,19 +234,151 @@ static struct proto_ops algif_hash_ops = {
+ .accept = hash_accept,
+ };
+
++static int hash_check_key(struct socket *sock)
++{
++ int err = 0;
++ struct sock *psk;
++ struct alg_sock *pask;
++ struct algif_hash_tfm *tfm;
++ struct sock *sk = sock->sk;
++ struct alg_sock *ask = alg_sk(sk);
++
++ lock_sock(sk);
++ if (ask->refcnt)
++ goto unlock_child;
++
++ psk = ask->parent;
++ pask = alg_sk(ask->parent);
++ tfm = pask->private;
++
++ err = -ENOKEY;
++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++ if (!tfm->has_key)
++ goto unlock;
++
++ if (!pask->refcnt++)
++ sock_hold(psk);
++
++ ask->refcnt = 1;
++ sock_put(psk);
++
++ err = 0;
++
++unlock:
++ release_sock(psk);
++unlock_child:
++ release_sock(sk);
++
++ return err;
++}
++
++static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t size)
++{
++ int err;
++
++ err = hash_check_key(sock);
++ if (err)
++ return err;
++
++ return hash_sendmsg(sock, msg, size);
++}
++
++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
++ int offset, size_t size, int flags)
++{
++ int err;
++
++ err = hash_check_key(sock);
++ if (err)
++ return err;
++
++ return hash_sendpage(sock, page, offset, size, flags);
++}
++
++static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t ignored, int flags)
++{
++ int err;
++
++ err = hash_check_key(sock);
++ if (err)
++ return err;
++
++ return hash_recvmsg(sock, msg, ignored, flags);
++}
++
++static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
++ int flags)
++{
++ int err;
++
++ err = hash_check_key(sock);
++ if (err)
++ return err;
++
++ return hash_accept(sock, newsock, flags);
++}
++
++static struct proto_ops algif_hash_ops_nokey = {
++ .family = PF_ALG,
++
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .getname = sock_no_getname,
++ .ioctl = sock_no_ioctl,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .getsockopt = sock_no_getsockopt,
++ .mmap = sock_no_mmap,
++ .bind = sock_no_bind,
++ .setsockopt = sock_no_setsockopt,
++ .poll = sock_no_poll,
++
++ .release = af_alg_release,
++ .sendmsg = hash_sendmsg_nokey,
++ .sendpage = hash_sendpage_nokey,
++ .recvmsg = hash_recvmsg_nokey,
++ .accept = hash_accept_nokey,
++};
++
+ static void *hash_bind(const char *name, u32 type, u32 mask)
+ {
+- return crypto_alloc_ahash(name, type, mask);
++ struct algif_hash_tfm *tfm;
++ struct crypto_ahash *hash;
++
++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++ if (!tfm)
++ return ERR_PTR(-ENOMEM);
++
++ hash = crypto_alloc_ahash(name, type, mask);
++ if (IS_ERR(hash)) {
++ kfree(tfm);
++ return ERR_CAST(hash);
++ }
++
++ tfm->hash = hash;
++
++ return tfm;
+ }
+
+ static void hash_release(void *private)
+ {
+- crypto_free_ahash(private);
++ struct algif_hash_tfm *tfm = private;
++
++ crypto_free_ahash(tfm->hash);
++ kfree(tfm);
+ }
+
+ static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+- return crypto_ahash_setkey(private, key, keylen);
++ struct algif_hash_tfm *tfm = private;
++ int err;
++
++ err = crypto_ahash_setkey(tfm->hash, key, keylen);
++ tfm->has_key = !err;
++
++ return err;
+ }
+
+ static void hash_sock_destruct(struct sock *sk)
+@@ -253,12 +392,14 @@ static void hash_sock_destruct(struct sock *sk)
+ af_alg_release_parent(sk);
+ }
+
+-static int hash_accept_parent(void *private, struct sock *sk)
++static int hash_accept_parent_nokey(void *private, struct sock *sk)
+ {
+ struct hash_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+- unsigned ds = crypto_ahash_digestsize(private);
++ struct algif_hash_tfm *tfm = private;
++ struct crypto_ahash *hash = tfm->hash;
++ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
++ unsigned ds = crypto_ahash_digestsize(hash);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+@@ -278,7 +419,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
+
+ ask->private = ctx;
+
+- ahash_request_set_tfm(&ctx->req, private);
++ ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+
+@@ -287,12 +428,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
+ return 0;
+ }
+
++static int hash_accept_parent(void *private, struct sock *sk)
++{
++ struct algif_hash_tfm *tfm = private;
++
++ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
++ return -ENOKEY;
++
++ return hash_accept_parent_nokey(private, sk);
++}
++
+ static const struct af_alg_type algif_type_hash = {
+ .bind = hash_bind,
+ .release = hash_release,
+ .setkey = hash_setkey,
+ .accept = hash_accept_parent,
++ .accept_nokey = hash_accept_parent_nokey,
+ .ops = &algif_hash_ops,
++ .ops_nokey = &algif_hash_ops_nokey,
+ .name = "hash",
+ .owner = THIS_MODULE
+ };
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 945075292bc9..5bc42f9b23f0 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -387,7 +387,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
+
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ sg = sgl->sg;
+- sg_unmark_end(sg + sgl->cur);
++ if (sgl->cur)
++ sg_unmark_end(sg + sgl->cur - 1);
+ do {
+ i = sgl->cur;
+ plen = min_t(int, len, PAGE_SIZE);
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 24f17e6c5904..4c850ac474e2 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -307,10 +307,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ srlen = cert->raw_serial_size;
+ q = cert->raw_serial;
+ }
+- if (srlen > 1 && *q == 0) {
+- srlen--;
+- q++;
+- }
+
+ ret = -ENOMEM;
+ desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
+diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
+index 06f1b60f02b2..4c0a0e271876 100644
+--- a/crypto/crc32c_generic.c
++++ b/crypto/crc32c_generic.c
+@@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS_CRYPTO("crc32c");
+ MODULE_ALIAS_CRYPTO("crc32c-generic");
+-MODULE_SOFTDEP("pre: crc32c");
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 47c713954bf3..03fbcd4a82c4 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+ crt->final = shash_async_final;
+ crt->finup = shash_async_finup;
+ crt->digest = shash_async_digest;
++ crt->setkey = shash_async_setkey;
++
++ crt->has_setkey = alg->setkey != shash_no_setkey;
+
+- if (alg->setkey)
+- crt->setkey = shash_async_setkey;
+ if (alg->export)
+ crt->export = shash_async_export;
+ if (alg->import)
+diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
+index 54d946a9eee6..6fbb10ca73b1 100644
+--- a/drivers/block/zram/zcomp.c
++++ b/drivers/block/zram/zcomp.c
+@@ -76,7 +76,7 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+ */
+ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+ {
+- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
++ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
+ if (!zstrm)
+ return NULL;
+
+@@ -85,7 +85,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+ * allocate 2 pages. 1 for compressed data, plus 1 extra for the
+ * case when compressed size is larger than the original one
+ */
+- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
++ zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
+ if (!zstrm->private || !zstrm->buffer) {
+ zcomp_strm_free(comp, zstrm);
+ zstrm = NULL;
+diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
+index f2afb7e988c3..dd6083124276 100644
+--- a/drivers/block/zram/zcomp_lz4.c
++++ b/drivers/block/zram/zcomp_lz4.c
+@@ -10,17 +10,36 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/lz4.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+
+ #include "zcomp_lz4.h"
+
+ static void *zcomp_lz4_create(void)
+ {
+- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
++ void *ret;
++
++ /*
++ * This function can be called in swapout/fs write path
++ * so we can't use GFP_FS|IO. And it assumes we already
++ * have at least one stream in zram initialization so we
++ * don't do best effort to allocate more stream in here.
++ * A default stream will work well without further multiple
++ * streams. That's why we use NORETRY | NOWARN.
++ */
++ ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
++ __GFP_NOWARN);
++ if (!ret)
++ ret = __vmalloc(LZ4_MEM_COMPRESS,
++ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_ZERO | __GFP_HIGHMEM,
++ PAGE_KERNEL);
++ return ret;
+ }
+
+ static void zcomp_lz4_destroy(void *private)
+ {
+- kfree(private);
++ kvfree(private);
+ }
+
+ static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
+diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
+index da1bc47d588e..edc549920fa0 100644
+--- a/drivers/block/zram/zcomp_lzo.c
++++ b/drivers/block/zram/zcomp_lzo.c
+@@ -10,17 +10,36 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/lzo.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+
+ #include "zcomp_lzo.h"
+
+ static void *lzo_create(void)
+ {
+- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
++ void *ret;
++
++ /*
++ * This function can be called in swapout/fs write path
++ * so we can't use GFP_FS|IO. And it assumes we already
++ * have at least one stream in zram initialization so we
++ * don't do best effort to allocate more stream in here.
++ * A default stream will work well without further multiple
++ * streams. That's why we use NORETRY | NOWARN.
++ */
++ ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
++ __GFP_NOWARN);
++ if (!ret)
++ ret = __vmalloc(LZO1X_MEM_COMPRESS,
++ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_ZERO | __GFP_HIGHMEM,
++ PAGE_KERNEL);
++ return ret;
+ }
+
+ static void lzo_destroy(void *private)
+ {
+- kfree(private);
++ kvfree(private);
+ }
+
+ static int lzo_compress(const unsigned char *src, unsigned char *dst,
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 7bf87d9bfd7d..fdba79c3877c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -144,6 +144,10 @@ static const struct usb_device_id btusb_table[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
++ /* Toshiba Corp - Broadcom based */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
++ .driver_info = BTUSB_BCM_PATCHRAM },
++
+ /* Intel Bluetooth USB Bootloader (RAM module) */
+ { USB_DEVICE(0x8087, 0x0a5a),
+ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index 1098ed3b9b89..dc45ddb36117 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
++#define MIN_OSCR_DELTA 16
++
+ static void __iomem *regbase;
+
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+@@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ pr_err("%s: setup_irq failed for %s\n", __func__,
+ clockevent.name);
+ clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+- 4, 0xf0000000);
++ MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index c89a7abb523f..8d8c35623f2a 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1230,6 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ at_xdmac_remove_xfer(atchan, desc);
+
++ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
+@@ -1362,6 +1363,8 @@ static int atmel_xdmac_resume(struct device *dev)
+ atchan = to_at_xdmac_chan(chan);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
+ if (at_xdmac_chan_is_cyclic(atchan)) {
++ if (at_xdmac_chan_is_paused(atchan))
++ at_xdmac_device_resume(chan);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 9e504d3b0d4f..303d937d63c7 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+@@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+ /* Called with dwc->lock held and all DMAC interrupts disabled */
+ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+- u32 status_err, u32 status_xfer)
++ u32 status_block, u32 status_err, u32 status_xfer)
+ {
+ unsigned long flags;
+
+- if (dwc->mask) {
++ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+@@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+@@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
+ {
+ struct dw_dma *dw = (struct dw_dma *)data;
+ struct dw_dma_chan *dwc;
++ u32 status_block;
+ u32 status_xfer;
+ u32 status_err;
+ int i;
+
++ status_block = dma_readl(dw, RAW.BLOCK);
+ status_xfer = dma_readl(dw, RAW.XFER);
+ status_err = dma_readl(dw, RAW.ERROR);
+
+@@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
+ for (i = 0; i < dw->dma.chancnt; i++) {
+ dwc = &dw->chan[i];
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++ dwc_handle_cyclic(dw, dwc, status_block, status_err,
++ status_xfer);
+ else if (status_err & (1 << i))
+ dwc_handle_error(dw, dwc);
+ else if (status_xfer & (1 << i))
+@@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
+ * Re-enable interrupts.
+ */
+ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+
+@@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ * softirq handler.
+ */
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ status = dma_readl(dw, STATUS_INT);
+@@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+
+ /* Try to recover */
+ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+@@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
+ dma_writel(dw, CFG, 0);
+
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+@@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+
+ /* Disable interrupts */
+ channel_clear_bit(dw, MASK.XFER, dwc->mask);
++ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+@@ -1245,7 +1256,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1254,27 +1264,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+-
+- /* Assert channel is idle */
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_err(chan2dev(&dwc->chan),
+- "%s: BUG: Attempted to start non-idle channel\n",
+- __func__);
+- dwc_dump_chan_regs(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return -EBUSY;
+- }
+-
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+- /* Setup DMAC channel registers */
+- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+- channel_writel(dwc, CTL_HI, 0);
+-
+- channel_set_bit(dw, CH_EN, dwc->mask);
+-
++ dwc_dostart(dwc, dwc->cdesc->desc[0]);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+@@ -1479,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
+
+ dwc_chan_disable(dw, dwc);
+
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+@@ -1569,9 +1560,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ /* Force dma off, just in case */
+ dw_dma_off(dw);
+
+- /* Disable BLOCK interrupts as well */
+- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ sizeof(struct dw_desc), 4, 0);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 592af5f0cf39..53587377e672 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+- int status;
+-
+ if (!edac_dev->edac_check)
+ return;
+
+- status = cancel_delayed_work(&edac_dev->work);
+- if (status == 0) {
+- /* workq instance might be running, wait for it */
+- flush_workqueue(edac_workqueue);
+- }
++ edac_dev->op_state = OP_OFFLINE;
++
++ cancel_delayed_work_sync(&edac_dev->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index af3be1914dbb..63ceb2d98565 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -581,18 +581,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+- int status;
+-
+- if (mci->op_state != OP_RUNNING_POLL)
+- return;
+-
+- status = cancel_delayed_work(&mci->work);
+- if (status == 0) {
+- edac_dbg(0, "not canceled, flush the queue\n");
++ mci->op_state = OP_OFFLINE;
+
+- /* workq instance might be running, wait for it */
+- flush_workqueue(edac_workqueue);
+- }
++ cancel_delayed_work_sync(&mci->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 112d63ad1154..67dc90365389 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -977,21 +977,26 @@ nomem:
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
+ const struct attribute_group **groups)
+ {
++ char *name;
+ int i, err;
+
+ /*
+ * The memory controller needs its own bus, in order to avoid
+ * namespace conflicts at /sys/bus/edac.
+ */
+- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+- if (!mci->bus->name)
++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++ if (!name)
+ return -ENOMEM;
+
++ mci->bus->name = name;
++
+ edac_dbg(0, "creating bus %s\n", mci->bus->name);
+
+ err = bus_register(mci->bus);
+- if (err < 0)
+- goto fail_free_name;
++ if (err < 0) {
++ kfree(name);
++ return err;
++ }
+
+ /* get the /sys/devices/system/edac subsys reference */
+ mci->dev.type = &mci_attr_type;
+@@ -1060,8 +1065,8 @@ fail_unregister_dimm:
+ device_unregister(&mci->dev);
+ fail_unregister_bus:
+ bus_unregister(mci->bus);
+-fail_free_name:
+- kfree(mci->bus->name);
++ kfree(name);
++
+ return err;
+ }
+
+@@ -1092,10 +1097,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++ const char *name = mci->bus->name;
++
+ edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ device_unregister(&mci->dev);
+ bus_unregister(mci->bus);
+- kfree(mci->bus->name);
++ kfree(name);
+ }
+
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 2cf44b4db80c..b4b38603b804 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+ */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+- int status;
+-
+ edac_dbg(0, "\n");
+
+- status = cancel_delayed_work(&pci->work);
+- if (status == 0)
+- flush_workqueue(edac_workqueue);
++ pci->op_state = OP_OFFLINE;
++
++ cancel_delayed_work_sync(&pci->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 109e776345d3..0ec9ad50ba7c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -861,28 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
+ {
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
++
+ if (!port->input) {
+ port->vcpi.num_slots = 0;
+
+ kfree(port->cached_edid);
+
+- /* we can't destroy the connector here, as
+- we might be holding the mode_config.mutex
+- from an EDID retrieval */
++ /*
++ * The only time we don't have a connector
++ * on an output port is if the connector init
++ * fails.
++ */
+ if (port->connector) {
++ /* we can't destroy the connector here, as
++ * we might be holding the mode_config.mutex
++ * from an EDID retrieval */
++
+ mutex_lock(&mgr->destroy_connector_lock);
+- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
++ list_add(&port->next, &mgr->destroy_connector_list);
+ mutex_unlock(&mgr->destroy_connector_lock);
+ schedule_work(&mgr->destroy_connector_work);
++ return;
+ }
++ /* no need to clean up vcpi
++ * as if we have no connector we never setup a vcpi */
+ drm_dp_port_teardown_pdt(port, port->pdt);
+-
+- if (!port->input && port->vcpi.vcpi > 0)
+- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ kfree(port);
+-
+- (*mgr->cbs->hotplug)(mgr);
+ }
+
+ static void drm_dp_put_port(struct drm_dp_mst_port *port)
+@@ -968,17 +973,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
+ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ u8 *rad)
+ {
+- int lct = port->parent->lct;
++ int parent_lct = port->parent->lct;
+ int shift = 4;
+- int idx = lct / 2;
+- if (lct > 1) {
+- memcpy(rad, port->parent->rad, idx);
+- shift = (lct % 2) ? 4 : 0;
++ int idx = (parent_lct - 1) / 2;
++ if (parent_lct > 1) {
++ memcpy(rad, port->parent->rad, idx + 1);
++ shift = (parent_lct % 2) ? 4 : 0;
+ } else
+ rad[0] = 0;
+
+ rad[idx] |= port->port_num << shift;
+- return lct + 1;
++ return parent_lct + 1;
+ }
+
+ /*
+@@ -1034,7 +1039,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
+ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
+ for (i = 0; i < (mstb->lct - 1); i++) {
+ int shift = (i % 2) ? 0 : 4;
+- int port_num = mstb->rad[i / 2] >> shift;
++ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
+ snprintf(temp, sizeof(temp), "-%d", port_num);
+ strlcat(proppath, temp, proppath_size);
+ }
+@@ -1112,12 +1117,21 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ char proppath[255];
+ build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+-
++ if (!port->connector) {
++ /* remove it from the port list */
++ mutex_lock(&mstb->mgr->lock);
++ list_del(&port->next);
++ mutex_unlock(&mstb->mgr->lock);
++ /* drop port list reference */
++ drm_dp_put_port(port);
++ goto out;
++ }
+ if (port->port_num >= 8) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ }
+ }
+
++out:
+ /* put reference to this port */
+ drm_dp_put_port(port);
+ }
+@@ -1175,7 +1189,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+
+ for (i = 0; i < lct - 1; i++) {
+ int shift = (i % 2) ? 0 : 4;
+- int port_num = rad[i / 2] >> shift;
++ int port_num = (rad[i / 2] >> shift) & 0xf;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+@@ -1195,6 +1209,50 @@ out:
+ return mstb;
+ }
+
++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
++ struct drm_dp_mst_branch *mstb,
++ uint8_t *guid)
++{
++ struct drm_dp_mst_branch *found_mstb;
++ struct drm_dp_mst_port *port;
++
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (!port->mstb)
++ continue;
++
++ if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
++ return port->mstb;
++
++ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
++
++ if (found_mstb)
++ return found_mstb;
++ }
++
++ return NULL;
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
++ struct drm_dp_mst_topology_mgr *mgr,
++ uint8_t *guid)
++{
++ struct drm_dp_mst_branch *mstb;
++
++ /* find the port by iterating down */
++ mutex_lock(&mgr->lock);
++
++ if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
++ mstb = mgr->mst_primary;
++ else
++ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
++
++ if (mstb)
++ kref_get(&mstb->kref);
++
++ mutex_unlock(&mgr->lock);
++ return mstb;
++}
++
+ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+ {
+@@ -1306,6 +1364,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+ {
+ struct drm_dp_mst_branch *mstb = txmsg->dst;
++ u8 req_type;
+
+ /* both msg slots are full */
+ if (txmsg->seqno == -1) {
+@@ -1322,7 +1381,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ txmsg->seqno = 1;
+ mstb->tx_slots[txmsg->seqno] = txmsg;
+ }
+- hdr->broadcast = 0;
++
++ req_type = txmsg->msg[0] & 0x7f;
++ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
++ req_type == DP_RESOURCE_STATUS_NOTIFY)
++ hdr->broadcast = 1;
++ else
++ hdr->broadcast = 0;
+ hdr->path_msg = txmsg->path_msg;
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+@@ -1424,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+ }
+
+ /* called holding qlock */
+-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_sideband_msg_tx *txmsg)
+ {
+- struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+- if (list_empty(&mgr->tx_msg_upq)) {
+- mgr->tx_up_in_progress = false;
+- return;
+- }
+-
+- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, true);
+- if (ret == 1) {
+- /* up txmsgs aren't put in slots - so free after we send it */
+- list_del(&txmsg->next);
+- kfree(txmsg);
+- } else if (ret)
++
++ if (ret != 1)
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+- mgr->tx_up_in_progress = true;
++
++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
+
+ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+@@ -1828,11 +1885,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+ mutex_lock(&mgr->qlock);
+- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+- if (!mgr->tx_up_in_progress) {
+- process_single_up_tx_qlock(mgr);
+- }
++
++ process_single_up_tx_qlock(mgr, txmsg);
++
+ mutex_unlock(&mgr->qlock);
++
++ kfree(txmsg);
+ return 0;
+ }
+
+@@ -2129,28 +2187,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+
+ if (mgr->up_req_recv.have_eomt) {
+ struct drm_dp_sideband_msg_req_body msg;
+- struct drm_dp_mst_branch *mstb;
++ struct drm_dp_mst_branch *mstb = NULL;
+ bool seqno;
+- mstb = drm_dp_get_mst_branch_device(mgr,
+- mgr->up_req_recv.initial_hdr.lct,
+- mgr->up_req_recv.initial_hdr.rad);
+- if (!mstb) {
+- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+- return 0;
++
++ if (!mgr->up_req_recv.initial_hdr.broadcast) {
++ mstb = drm_dp_get_mst_branch_device(mgr,
++ mgr->up_req_recv.initial_hdr.lct,
++ mgr->up_req_recv.initial_hdr.rad);
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
+ }
+
+ seqno = mgr->up_req_recv.initial_hdr.seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++
++ if (!mstb)
++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
++
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
+ drm_dp_update_port(mstb, &msg.u.conn_stat);
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
+ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++ if (!mstb)
++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
++
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ }
+
+@@ -2330,6 +2410,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ if (pbn == port->vcpi.pbn) {
+ *slots = port->vcpi.num_slots;
++ drm_dp_put_port(port);
+ return true;
+ }
+ }
+@@ -2489,32 +2570,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
+ */
+ int drm_dp_calc_pbn_mode(int clock, int bpp)
+ {
+- fixed20_12 pix_bw;
+- fixed20_12 fbpp;
+- fixed20_12 result;
+- fixed20_12 margin, tmp;
+- u32 res;
+-
+- pix_bw.full = dfixed_const(clock);
+- fbpp.full = dfixed_const(bpp);
+- tmp.full = dfixed_const(8);
+- fbpp.full = dfixed_div(fbpp, tmp);
+-
+- result.full = dfixed_mul(pix_bw, fbpp);
+- margin.full = dfixed_const(54);
+- tmp.full = dfixed_const(64);
+- margin.full = dfixed_div(margin, tmp);
+- result.full = dfixed_div(result, margin);
+-
+- margin.full = dfixed_const(1006);
+- tmp.full = dfixed_const(1000);
+- margin.full = dfixed_div(margin, tmp);
+- result.full = dfixed_mul(result, margin);
+-
+- result.full = dfixed_div(result, tmp);
+- result.full = dfixed_ceil(result);
+- res = dfixed_trunc(result);
+- return res;
++ u64 kbps;
++ s64 peak_kbps;
++ u32 numerator;
++ u32 denominator;
++
++ kbps = clock * bpp;
++
++ /*
++ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
++ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
++ * common multiplier to render an integer PBN for all link rate/lane
++ * counts combinations
++ * calculate
++ * peak_kbps *= (1006/1000)
++ * peak_kbps *= (64/54)
++ * peak_kbps *= 8 convert to bytes
++ */
++
++ numerator = 64 * 1006;
++ denominator = 54 * 8 * 1000 * 1000;
++
++ kbps *= numerator;
++ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
++
++ return drm_fixp2int_ceil(peak_kbps);
+ }
+ EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+@@ -2522,11 +2602,23 @@ static int test_calc_pbn_mode(void)
+ {
+ int ret;
+ ret = drm_dp_calc_pbn_mode(154000, 30);
+- if (ret != 689)
++ if (ret != 689) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 154000, 30, 689, ret);
+ return -EINVAL;
++ }
+ ret = drm_dp_calc_pbn_mode(234000, 30);
+- if (ret != 1047)
++ if (ret != 1047) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 234000, 30, 1047, ret);
+ return -EINVAL;
++ }
++ ret = drm_dp_calc_pbn_mode(297000, 24);
++ if (ret != 1063) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 297000, 24, 1063, ret);
++ return -EINVAL;
++ }
+ return 0;
+ }
+
+@@ -2660,8 +2752,8 @@ static void drm_dp_tx_work(struct work_struct *work)
+ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ {
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+- struct drm_connector *connector;
+-
++ struct drm_dp_mst_port *port;
++ bool send_hotplug = false;
+ /*
+ * Not a regular list traverse as we have to drop the destroy
+ * connector lock before destroying the connector, to avoid AB->BA
+@@ -2669,16 +2761,25 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ */
+ for (;;) {
+ mutex_lock(&mgr->destroy_connector_lock);
+- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
+- if (!connector) {
++ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
++ if (!port) {
+ mutex_unlock(&mgr->destroy_connector_lock);
+ break;
+ }
+- list_del(&connector->destroy_list);
++ list_del(&port->next);
+ mutex_unlock(&mgr->destroy_connector_lock);
+
+- mgr->cbs->destroy_connector(mgr, connector);
++ mgr->cbs->destroy_connector(mgr, port->connector);
++
++ drm_dp_port_teardown_pdt(port, port->pdt);
++
++ if (!port->input && port->vcpi.vcpi > 0)
++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++ kfree(port);
++ send_hotplug = true;
+ }
++ if (send_hotplug)
++ (*mgr->cbs->hotplug)(mgr);
+ }
+
+ /**
+@@ -2701,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
+ mutex_init(&mgr->destroy_connector_lock);
+- INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 63503879a676..0d75e75b1da3 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -195,7 +195,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
+ mode_flags |= DRM_MODE_FLAG_3D_MASK;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+- mode->status = drm_mode_validate_basic(mode);
++ if (mode->status == MODE_OK)
++ mode->status = drm_mode_validate_basic(mode);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_size(mode, maxX, maxY);
+diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
+index f3e84c44d009..4decf518d106 100644
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -317,6 +317,10 @@ void i915_gem_context_reset(struct drm_device *dev)
+ i915_gem_context_unreference(lctx);
+ ring->last_context = NULL;
+ }
++
++ /* Force the GPU state to be reinitialised on enabling */
++ if (ring->default_context)
++ ring->default_context->legacy_hw_ctx.initialized = false;
+ }
+ }
+
+@@ -704,7 +708,7 @@ static int do_switch(struct intel_engine_cs *ring,
+ goto unpin_out;
+ }
+
+- if (!to->legacy_hw_ctx.initialized) {
++ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
+ hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7b27a114b030..b103773df2a3 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10391,11 +10391,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
+ pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ }
+
+- /* Clamp bpp to 8 on screens without EDID 1.4 */
+- if (connector->base.display_info.bpc == 0 && bpp > 24) {
+- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+- bpp);
+- pipe_config->pipe_bpp = 24;
++ /* Clamp bpp to default limit on screens without EDID 1.4 */
++ if (connector->base.display_info.bpc == 0) {
++ int type = connector->base.connector_type;
++ int clamp_bpp = 24;
++
++ /* Fall back to 18 bpp when DP sink capability is unknown. */
++ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++ type == DRM_MODE_CONNECTOR_eDP)
++ clamp_bpp = 18;
++
++ if (bpp > clamp_bpp) {
++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++ bpp, clamp_bpp);
++ pipe_config->pipe_bpp = clamp_bpp;
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 5cb47482d29f..88c557551b89 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -439,9 +439,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
+
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+- mutex_lock(&dev->mode_config.mutex);
++ drm_modeset_lock_all(dev);
+ intel_connector_add_to_fbdev(intel_connector);
+- mutex_unlock(&dev->mode_config.mutex);
++ drm_modeset_unlock_all(dev);
+ drm_connector_register(&intel_connector->base);
+ return connector;
+ }
+@@ -452,16 +452,16 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ /* need to nuke the connector */
+- mutex_lock(&dev->mode_config.mutex);
++ drm_modeset_lock_all(dev);
+ intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+- mutex_unlock(&dev->mode_config.mutex);
++ drm_modeset_unlock_all(dev);
+
+ intel_connector->unregister(intel_connector);
+
+- mutex_lock(&dev->mode_config.mutex);
++ drm_modeset_lock_all(dev);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+- mutex_unlock(&dev->mode_config.mutex);
++ drm_modeset_unlock_all(dev);
+
+ drm_reinit_primary_mode_group(dev);
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 3162040bc314..05490ef5a2aa 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+
++ mutex_lock(&drm->dev->mode_config.mutex);
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ mutex_unlock(&drm->dev->mode_config.mutex);
++
+ drm_helper_hpd_irq_event(connector->dev);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5be50ef2b30e..bb292143997e 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -2310,8 +2310,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ if (connector && (radeon_audio != 0) &&
+ ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+- (ENCODER_MODE_IS_DP(encoder_mode) &&
+- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
++ ENCODER_MODE_IS_DP(encoder_mode)))
+ radeon_audio_mode_set(encoder, adjusted_mode);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 44480c1b9738..848b1ffd5cc4 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -282,6 +282,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE8(rdev)) {
++ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++ div = radeon_audio_decode_dfs_div(div);
++
++ if (div)
++ clock = clock * 100 / div;
++
+ WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 9953356fe263..3cf04a2f44bb 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
++ if (ASIC_IS_DCE41(rdev)) {
++ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++ div = radeon_audio_decode_dfs_div(div);
++
++ if (div)
++ clock = 100 * clock / div;
++ }
++
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 4aa5f755572b..13b6029d65cc 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -511,6 +511,11 @@
+ #define DCCG_AUDIO_DTO1_CNTL 0x05cc
+ # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+
++#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
++
+ /* DCE 4.0 AFMT */
+ #define HDMI_CONTROL 0x7030
+ # define HDMI_KEEPOUT_MODE (1 << 0)
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 91c3f60f8bac..4bca29c5abfa 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -268,6 +268,7 @@ struct radeon_clock {
+ uint32_t current_dispclk;
+ uint32_t dp_extclk;
+ uint32_t max_pixel_clock;
++ uint32_t vco_freq;
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 8f285244c839..de9a2ffcf5f7 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++ if (((dev->pdev->device == 0x9802) ||
++ (dev->pdev->device == 0x9805) ||
++ (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+ }
+
+- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+- if ((dev->pdev->device == 0x9805) &&
+- (dev->pdev->subsystem_vendor == 0x1734) &&
+- (dev->pdev->subsystem_device == 0x11bd)) {
+- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+- return false;
+- }
+-
+ return true;
+ }
+
+@@ -1112,6 +1106,31 @@ union firmware_info {
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
+ };
+
++union igp_info {
++ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
++};
++
++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
++{
++ struct radeon_mode_info *mode_info = &rdev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++ union igp_info *igp_info;
++ u8 frev, crev;
++ u16 data_offset;
++
++ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset)) {
++ igp_info = (union igp_info *)(mode_info->atom_context->bios +
++ data_offset);
++ rdev->clock.vco_freq =
++ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
++ }
++}
++
+ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ {
+ struct radeon_device *rdev = dev->dev_private;
+@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ rdev->mode_info.firmware_flags =
+ le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
++ if (ASIC_IS_DCE8(rdev))
++ rdev->clock.vco_freq =
++ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
++ else if (ASIC_IS_DCE5(rdev))
++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
++ else if (ASIC_IS_DCE41(rdev))
++ radeon_atombios_get_dentist_vco_freq(rdev);
++ else
++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
++
++ if (rdev->clock.vco_freq == 0)
++ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
++
+ return true;
+ }
+
+ return false;
+ }
+
+-union igp_info {
+- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+-};
+-
+ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+ {
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index d77dd1430d58..b214663b370d 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -698,26 +698,37 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+ {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (!dig || !dig->afmt)
+ return;
+
+- radeon_audio_set_mute(encoder, true);
++ if (!connector)
++ return;
++
++ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ radeon_audio_set_mute(encoder, true);
+
+- radeon_audio_write_speaker_allocation(encoder);
+- radeon_audio_write_sad_regs(encoder);
+- radeon_audio_write_latency_fields(encoder, mode);
+- radeon_audio_set_dto(encoder, mode->clock);
+- radeon_audio_set_vbi_packet(encoder);
+- radeon_hdmi_set_color_depth(encoder);
+- radeon_audio_update_acr(encoder, mode->clock);
+- radeon_audio_set_audio_packet(encoder);
+- radeon_audio_select_pin(encoder);
++ radeon_audio_write_speaker_allocation(encoder);
++ radeon_audio_write_sad_regs(encoder);
++ radeon_audio_write_latency_fields(encoder, mode);
++ radeon_audio_set_dto(encoder, mode->clock);
++ radeon_audio_set_vbi_packet(encoder);
++ radeon_hdmi_set_color_depth(encoder);
++ radeon_audio_update_acr(encoder, mode->clock);
++ radeon_audio_set_audio_packet(encoder);
++ radeon_audio_select_pin(encoder);
+
+- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+- return;
++ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++ return;
+
+- radeon_audio_set_mute(encoder, false);
++ radeon_audio_set_mute(encoder, false);
++ } else {
++ radeon_hdmi_set_color_depth(encoder);
++
++ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++ return;
++ }
+ }
+
+ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+@@ -728,28 +739,24 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+- struct radeon_connector_atom_dig *dig_connector =
+- radeon_connector->con_priv;
+
+- if (!connector)
++ if (!dig || !dig->afmt)
+ return;
+
+- if (!dig || !dig->afmt)
++ if (!connector)
+ return;
+
+- radeon_audio_write_speaker_allocation(encoder);
+- radeon_audio_write_sad_regs(encoder);
+- radeon_audio_write_latency_fields(encoder, mode);
+- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+- else
+- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+- radeon_audio_set_audio_packet(encoder);
+- radeon_audio_select_pin(encoder);
++ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++ radeon_audio_write_speaker_allocation(encoder);
++ radeon_audio_write_sad_regs(encoder);
++ radeon_audio_write_latency_fields(encoder, mode);
++ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
++ radeon_audio_set_audio_packet(encoder);
++ radeon_audio_select_pin(encoder);
+
+- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+- return;
++ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++ return;
++ }
+ }
+
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+@@ -768,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+ if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+ radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+ }
++
++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
++{
++ if (div >= 8 && div < 64)
++ return (div - 8) * 25 + 200;
++ else if (div >= 64 && div < 96)
++ return (div - 64) * 50 + 1600;
++ else if (div >= 96 && div < 128)
++ return (div - 96) * 100 + 3200;
++ else
++ return 0;
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index 059cc3012062..5c70cceaa4a6 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+ void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
+
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 604c44d88e7a..ccab94ed9d94 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1734,6 +1734,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ }
+
+ drm_kms_helper_poll_enable(dev);
++ drm_helper_hpd_irq_event(dev);
+
+ /* set the power state here in case we are a PX system or headless */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index 42986130cc63..c9ff4cf4c4e7 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -287,9 +287,9 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+
+- mutex_lock(&dev->mode_config.mutex);
++ drm_modeset_lock_all(dev);
+ radeon_fb_add_connector(rdev, connector);
+- mutex_unlock(&dev->mode_config.mutex);
++ drm_modeset_unlock_all(dev);
+
+ drm_connector_register(connector);
+ return connector;
+@@ -304,12 +304,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+
+ drm_connector_unregister(connector);
+ /* need to nuke the connector */
+- mutex_lock(&dev->mode_config.mutex);
++ drm_modeset_lock_all(dev);
+ /* dpms off */
+ radeon_fb_remove_connector(rdev, connector);
+
+ drm_connector_cleanup(connector);
+- mutex_unlock(&dev->mode_config.mutex);
++ drm_modeset_unlock_all(dev);
+ drm_reinit_primary_mode_group(dev);
+
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 676362769b8d..741065bd14b3 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
++#include <drm/drm_cache.h>
+ #include "radeon.h"
+ #include "radeon_trace.h"
+
+@@ -225,7 +226,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+- bo->flags &= ~RADEON_GEM_GTT_WC;
++ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+ #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+ /* Don't try to enable write-combining when it can't work, or things
+ * may be slow
+@@ -237,7 +238,13 @@ int radeon_bo_create(struct radeon_device *rdev,
+
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+- bo->flags &= ~RADEON_GEM_GTT_WC;
++ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
++#else
++ /* For architectures that don't support WC memory,
++ * mask out the WC flag from the BO
++ */
++ if (!drm_arch_can_wc_memory())
++ bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+
+ radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 9c3377ca17b7..8ec4e4591756 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -456,15 +456,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+- eoffset = soffset + size;
++ eoffset = soffset + size - 1;
+ if (soffset >= eoffset) {
+ r = -EINVAL;
+ goto error_unreserve;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+- if (last_pfn > rdev->vm_manager.max_pfn) {
+- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
++ if (last_pfn >= rdev->vm_manager.max_pfn) {
++ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ r = -EINVAL;
+ goto error_unreserve;
+@@ -479,7 +479,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
++ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
+ if (it && it != &bo_va->it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+@@ -522,7 +522,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+
+ if (soffset || eoffset) {
+ bo_va->it.start = soffset;
+- bo_va->it.last = eoffset - 1;
++ bo_va->it.last = eoffset;
+ interval_tree_insert(&bo_va->it, &vm->va);
+ }
+
+@@ -891,7 +891,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ unsigned i;
+
+ start >>= radeon_vm_block_size;
+- end >>= radeon_vm_block_size;
++ end = (end - 1) >> radeon_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+ radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 3afac3013983..c126f6bfbed1 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -915,6 +915,11 @@
+ #define DCCG_AUDIO_DTO1_PHASE 0x05c0
+ #define DCCG_AUDIO_DTO1_MODULE 0x05c4
+
++#define DENTIST_DISPCLK_CNTL 0x0490
++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
++
+ #define AFMT_AUDIO_SRC_CONTROL 0x713c
+ #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+ /* AFMT_AUDIO_SRC_SELECT
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 15a8d7746fd2..2aa0e927d490 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+ *
+ **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1447,6 +1448,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++ if (vgacon_text_force())
++ return -EINVAL;
++#endif
++
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
+ if (ret)
+ DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index 894531d315b8..046144fc5aff 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -543,7 +543,7 @@ static int coresight_name_match(struct device *dev, void *data)
+ to_match = data;
+ i_csdev = to_coresight_device(dev);
+
+- if (!strcmp(to_match, dev_name(&i_csdev->dev)))
++ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
+ return 1;
+
+ return 0;
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 4fa88ba2963e..131994382b22 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
+ 32768 /* 1E */
+ };
+
+-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
++ gfp_t gfp)
+ {
+- unsigned long page = get_zeroed_page(GFP_KERNEL);
++ unsigned long page = get_zeroed_page(gfp);
+
+ /*
+ * Free the page if someone raced with us installing it.
+@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+- enum ib_qp_type type, u8 port)
++ enum ib_qp_type type, u8 port, gfp_t gfp)
+ {
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+- get_map_page(qpt, map);
++ get_map_page(qpt, map, gfp);
+ if (unlikely(!map->page))
+ break;
+ }
+@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
++ gfp_t gfp;
++
+
+ if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
+- init_attr->create_flags) {
+- ret = ERR_PTR(-EINVAL);
+- goto bail;
+- }
++ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
++ return ERR_PTR(-EINVAL);
++
++ /* GFP_NOIO is applicable in RC QPs only */
++ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
++ init_attr->qp_type != IB_QPT_RC)
++ return ERR_PTR(-EINVAL);
++
++ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
++ GFP_NOIO : GFP_KERNEL;
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ sz = sizeof(struct qib_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct qib_swqe);
+- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
++ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
++ gfp, PAGE_KERNEL);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
++ qp = kzalloc(sz + sg_list_sz, gfp);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ RCU_INIT_POINTER(qp->next, NULL);
+- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
++ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
+ if (!qp->s_hdr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct qib_rwqe);
+- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+- qp->r_rq.size * sz);
++ if (gfp != GFP_NOIO)
++ qp->r_rq.wq = vmalloc_user(
++ sizeof(struct qib_rwq) +
++ qp->r_rq.size * sz);
++ else
++ qp->r_rq.wq = __vmalloc(
++ sizeof(struct qib_rwq) +
++ qp->r_rq.size * sz,
++ gfp, PAGE_KERNEL);
++
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+- init_attr->port_num);
++ init_attr->port_num, gfp);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index f8ea069a3eaf..b2fb5286dbd9 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct qib_mcast *mcast = NULL;
+- struct qib_mcast_qp *p, *tmp;
++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+- ret = -EINVAL;
+- goto bail;
+- }
++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++ return -EINVAL;
+
+ spin_lock_irq(&ibp->lock);
+
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+- ret = -EINVAL;
+- goto bail;
++ return -EINVAL;
+ }
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
++ delp = p;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ }
+
+ spin_unlock_irq(&ibp->lock);
++ /* QP not attached */
++ if (!delp)
++ return -EINVAL;
++ /*
++ * Wait for any list walkers to finish before freeing the
++ * list element.
++ */
++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++ qib_mcast_qp_free(delp);
+
+- if (p) {
+- /*
+- * Wait for any list walkers to finish before freeing the
+- * list element.
+- */
+- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+- qib_mcast_qp_free(p);
+- }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+-
+- ret = 0;
+-
+-bail:
+- return ret;
++ return 0;
+ }
+
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index ce3d40004458..0f5b400706d7 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1214,7 +1214,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
+ ETP_WMAX_V2, 0, 0);
+ }
+- input_mt_init_slots(dev, 2, 0);
++ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
+ input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ break;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index c11556563ef0..68f5f4a0f1e7 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ },
+ },
+ {
++ /* Fujitsu Lifebook U745 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
++ },
++ },
++ {
+ /* Fujitsu T70H */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index e29d5d7fe220..937832cfa48e 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -341,17 +341,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+- /* Only leaf entries at the last level */
+- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+- return;
+-
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+- end = (void *)ptep + table_size;
++
++ /* Only leaf entries at the last level */
++ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
++ end = ptep;
++ else
++ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index 63cd031b2c28..869d01dd4063 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
+ priority > AT91_AIC_IRQ_MAX_PRIORITY)
+ return -EINVAL;
+
+- *val &= AT91_AIC_PRIOR;
++ *val &= ~AT91_AIC_PRIOR;
+ *val |= priority;
+
+ return 0;
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 00cde40db572..43829d9493f7 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
+ do {
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
++ cond_resched();
+
+ if (ret && ret != -EAGAIN)
+ pr_warn("gc failed!");
+@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ rw_lock(true, b, b->level);
+
+ if (b->key.ptr[0] != btree_ptr ||
+- b->seq != seq + 1)
++ b->seq != seq + 1) {
++ op->lock = b->level;
+ goto out;
++ }
+ }
+
+ SET_KEY_PTRS(check_key, 1);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 4dd2bb7167f0..42522c8f13c6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -708,6 +708,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+ sysfs_create_link(&c->kobj, &d->kobj, d->name),
+ "Couldn't create device <-> cache set symlinks");
++
++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
+ }
+
+ static void bcache_device_detach(struct bcache_device *d)
+@@ -878,8 +880,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ buf[SB_LABEL_SIZE] = '\0';
+ env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+
+- if (atomic_xchg(&dc->running, 1))
++ if (atomic_xchg(&dc->running, 1)) {
++ kfree(env[1]);
++ kfree(env[2]);
+ return;
++ }
+
+ if (!d->c &&
+ BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -1967,6 +1972,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ else
+ err = "device busy";
+ mutex_unlock(&bch_register_lock);
++ if (attr == &ksysfs_register_quiet)
++ goto out;
+ }
+ goto err;
+ }
+@@ -2005,8 +2012,7 @@ out:
+ err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ err:
+- if (attr != &ksysfs_register_quiet)
+- pr_info("error opening %s: %s", path, err);
++ pr_info("error opening %s: %s", path, err);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -2100,8 +2106,10 @@ static int __init bcache_init(void)
+ closure_debug_init();
+
+ bcache_major = register_blkdev(0, "bcache");
+- if (bcache_major < 0)
++ if (bcache_major < 0) {
++ unregister_reboot_notifier(&reboot);
+ return bcache_major;
++ }
+
+ if (!(bcache_wq = create_workqueue("bcache")) ||
+ !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index f1986bcd1bf0..540256a0df4f 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+
+ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ {
++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
++
++ BUG_ON(KEY_INODE(k) != dc->disk.id);
++
+ return KEY_DIRTY(k);
+ }
+
+@@ -372,11 +376,24 @@ next:
+ }
+ }
+
++/*
++ * Returns true if we scanned the entire disk
++ */
+ static bool refill_dirty(struct cached_dev *dc)
+ {
+ struct keybuf *buf = &dc->writeback_keys;
++ struct bkey start = KEY(dc->disk.id, 0, 0);
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+- bool searched_from_start = false;
++ struct bkey start_pos;
++
++ /*
++ * make sure keybuf pos is inside the range for this disk - at bringup
++ * we might not be attached yet so this disk's inode nr isn't
++ * initialized then
++ */
++ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
++ bkey_cmp(&buf->last_scanned, &end) > 0)
++ buf->last_scanned = start;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
+ return false;
+ }
+
+- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+- buf->last_scanned = KEY(dc->disk.id, 0, 0);
+- searched_from_start = true;
+- }
+-
++ start_pos = buf->last_scanned;
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
++ if (bkey_cmp(&buf->last_scanned, &end) < 0)
++ return false;
++
++ /*
++ * If we get to the end start scanning again from the beginning, and
++ * only scan up to where we initially started scanning from:
++ */
++ buf->last_scanned = start;
++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
++
++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
+ }
+
+ static int bch_writeback_thread(void *arg)
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 0a9dab187b79..073a042aed24 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+
+ static inline void bch_writeback_queue(struct cached_dev *dc)
+ {
+- wake_up_process(dc->writeback_thread);
++ if (!IS_ERR_OR_NULL(dc->writeback_thread))
++ wake_up_process(dc->writeback_thread);
+ }
+
+ static inline void bch_writeback_add(struct cached_dev *dc)
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index 0b2536247cf5..84e27708ad97 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -70,7 +70,7 @@ struct dm_exception_store_type {
+ * Update the metadata with this exception.
+ */
+ void (*commit_exception) (struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context);
+
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 808b8419bc48..9feb894e5565 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -694,7 +694,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+
+ static void persistent_commit_exception(struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context)
+ {
+@@ -703,6 +703,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ struct core_exception ce;
+ struct commit_callback *cb;
+
++ if (!valid)
++ ps->valid = 0;
++
+ ce.old_chunk = e->old_chunk;
+ ce.new_chunk = e->new_chunk;
+ write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 1ce9a2586e41..31439d53cf7e 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+
+ static void transient_commit_exception(struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context)
+ {
+ /* Just succeed */
+- callback(callback_context, 1);
++ callback(callback_context, valid);
+ }
+
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index f83a0f3fc365..11ec9d2a27df 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1428,8 +1428,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ dm_table_event(s->ti->table);
+ }
+
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++ struct dm_snap_pending_exception *pe = context;
+ struct dm_exception *e;
+ struct dm_snapshot *s = pe->snap;
+ struct bio *origin_bios = NULL;
+@@ -1500,24 +1501,13 @@ out:
+ free_pending_exception(pe);
+ }
+
+-static void commit_callback(void *context, int success)
+-{
+- struct dm_snap_pending_exception *pe = context;
+-
+- pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ struct dm_snapshot *s = pe->snap;
+
+- if (unlikely(pe->copy_error))
+- pending_complete(pe, 0);
+-
+- else
+- /* Update the metadata if we are persistent */
+- s->store->type->commit_exception(s->store, &pe->e,
+- commit_callback, pe);
++ /* Update the metadata if we are persistent */
++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++ pending_complete, pe);
+ }
+
+ /*
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 7073b22d4cb4..cb58bb318782 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3210,8 +3210,8 @@ static void pool_postsuspend(struct dm_target *ti)
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+- cancel_delayed_work(&pool->waker);
+- cancel_delayed_work(&pool->no_space_timeout);
++ cancel_delayed_work_sync(&pool->waker);
++ cancel_delayed_work_sync(&pool->no_space_timeout);
+ flush_workqueue(pool->wq);
+ (void) commit(pool);
+ }
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 882ca417f328..3ab874703d11 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2333,9 +2333,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ __func__, c->delivery_system, fe->ops.info.type);
+
+- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+- * do it, it is done for it. */
+- info->caps |= FE_CAN_INVERSION_AUTO;
++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++ info->caps |= FE_CAN_INVERSION_AUTO;
+ err = 0;
+ break;
+ }
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index a2631be7ffac..08e0f0dd8728 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ struct tda1004x_state* state = fe->demodulator_priv;
++ int status;
+
+ dprintk("%s\n", __func__);
+
++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++ if (status == -1)
++ return -EIO;
++
++ /* Only update the properties cache if device is locked */
++ if (!(status & 8))
++ return 0;
++
+ // inversion status
+ fe_params->inversion = INVERSION_OFF;
+ if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
+index ac3cd74e824e..067db727e685 100644
+--- a/drivers/media/pci/saa7134/saa7134-alsa.c
++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
+@@ -1218,6 +1218,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
+
+ static int alsa_device_exit(struct saa7134_dev *dev)
+ {
++ if (!snd_saa7134_cards[dev->nr])
++ return 1;
+
+ snd_card_free(snd_saa7134_cards[dev->nr]);
+ snd_saa7134_cards[dev->nr] = NULL;
+@@ -1267,7 +1269,8 @@ static void saa7134_alsa_exit(void)
+ int idx;
+
+ for (idx = 0; idx < SNDRV_CARDS; idx++) {
+- snd_card_free(snd_saa7134_cards[idx]);
++ if (snd_saa7134_cards[idx])
++ snd_card_free(snd_saa7134_cards[idx]);
+ }
+
+ saa7134_dmasound_init = NULL;
+diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
+index 7830aef3db45..40f77685cc4a 100644
+--- a/drivers/media/rc/sunxi-cir.c
++++ b/drivers/media/rc/sunxi-cir.c
+@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
+ if (!ir)
+ return -ENOMEM;
+
++ spin_lock_init(&ir->ir_lock);
++
+ if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
+ ir->fifo_size = 64;
+ else
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 146071b8e116..bfff1d1c70ab 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+- /* Set requested framerate */
+- sd->frame_rate = tpf->denominator / tpf->numerator;
++ if (tpf->numerator == 0 || tpf->denominator == 0)
++ /* Set default framerate */
++ sd->frame_rate = 30;
++ else
++ /* Set requested framerate */
++ sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ if (gspca_dev->streaming)
+ set_frame_rate(gspca_dev);
+
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index c70ff406b07a..c028a5c2438e 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int fr, i;
+
+- sd->framerate = tpf->denominator / tpf->numerator;
++ if (tpf->numerator == 0 || tpf->denominator == 0)
++ sd->framerate = 30;
++ else
++ sd->framerate = tpf->denominator / tpf->numerator;
++
+ if (gspca_dev->streaming)
+ setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index cf9d644a8aff..472eaad6fb78 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -2662,10 +2662,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ return res | POLLERR;
+
+ /*
+- * For output streams you can write as long as there are fewer buffers
+- * queued than there are buffers available.
++ * For output streams you can call write() as long as there are fewer
++ * buffers queued than there are buffers available.
+ */
+- if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
++ if (V4L2_TYPE_IS_OUTPUT(q->type) && q->fileio && q->queued_count < q->num_buffers)
+ return res | POLLOUT | POLLWRNORM;
+
+ if (list_empty(&q->done_list))
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 31a9ef256d06..ce3044883a42 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -661,9 +661,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+- card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
++ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
++ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
++ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ err = mmc_execute_tuning(card);
++
++ /*
++ * As SD Specifications Part1 Physical Layer Specification
++ * Version 3.01 says, CMD19 tuning is available for unlocked
++ * cards in transfer state of 1.8V signaling mode. The small
++ * difference between v3.00 and 3.01 spec means that CMD19
++ * tuning is also available for DDR50 mode.
++ */
++ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
++ pr_warn("%s: ddr50 tuning failed\n",
++ mmc_hostname(card->host));
++ err = 0;
++ }
++ }
++
+ out:
+ kfree(status);
+
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 5bc6c7dbbd60..941beb3b5fa2 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -566,8 +566,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
++ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
++ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
+ err = mmc_execute_tuning(card);
+ out:
+ return err;
+@@ -661,7 +661,7 @@ try_again:
+ */
+ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+- ocr);
++ ocr_card);
+ if (err == -EAGAIN) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index fb266745f824..acece3299756 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
+ {
+ .id = 0x00280180,
+ .mask = 0x00ffffff,
+- .data = &variant_u300,
++ .data = &variant_nomadik,
+ },
+ {
+ .id = 0x00480180,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index cbaf3df3ebd9..f47c4a8370be 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -555,9 +555,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+
+ BUG_ON(len > 65536);
+
+- /* tran, valid */
+- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
+- desc += host->desc_sz;
++ if (len) {
++ /* tran, valid */
++ sdhci_adma_write_desc(host, desc, addr, len,
++ ADMA2_TRAN_VALID);
++ desc += host->desc_sz;
++ }
+
+ /*
+ * If this triggers then we have a calculation bug
+@@ -2790,7 +2793,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+- if (host->runtime_suspended || host->bus_on)
++ if (host->bus_on)
+ return;
+ host->bus_on = true;
+ pm_runtime_get_noresume(host->mmc->parent);
+@@ -2798,7 +2801,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+- if (host->runtime_suspended || !host->bus_on)
++ if (!host->bus_on)
+ return;
+ host->bus_on = false;
+ pm_runtime_put_noidle(host->mmc->parent);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+new file mode 100644
+index 000000000000..d60a467a983c
+--- /dev/null
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -0,0 +1,2717 @@
++/******************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
++ * USA
++ *
++ * The full GNU General Public License is included in this distribution
++ * in the file called COPYING.
++ *
++ * Contact Information:
++ * Intel Linux Wireless <linuxwifi@intel.com>
++ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *****************************************************************************/
++#include <linux/pci.h>
++#include <linux/pci-aspm.h>
++#include <linux/interrupt.h>
++#include <linux/debugfs.h>
++#include <linux/sched.h>
++#include <linux/bitops.h>
++#include <linux/gfp.h>
++#include <linux/vmalloc.h>
++
++#include "iwl-drv.h"
++#include "iwl-trans.h"
++#include "iwl-csr.h"
++#include "iwl-prph.h"
++#include "iwl-scd.h"
++#include "iwl-agn-hw.h"
++#include "iwl-fw-error-dump.h"
++#include "internal.h"
++#include "iwl-fh.h"
++
++/* extended range in FW SRAM */
++#define IWL_FW_MEM_EXTENDED_START 0x40000
++#define IWL_FW_MEM_EXTENDED_END 0x57FFF
++
++static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ if (!trans_pcie->fw_mon_page)
++ return;
++
++ dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
++ trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
++ __free_pages(trans_pcie->fw_mon_page,
++ get_order(trans_pcie->fw_mon_size));
++ trans_pcie->fw_mon_page = NULL;
++ trans_pcie->fw_mon_phys = 0;
++ trans_pcie->fw_mon_size = 0;
++}
++
++static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct page *page = NULL;
++ dma_addr_t phys;
++ u32 size = 0;
++ u8 power;
++
++ if (!max_power) {
++ /* default max_power is maximum */
++ max_power = 26;
++ } else {
++ max_power += 11;
++ }
++
++ if (WARN(max_power > 26,
++ "External buffer size for monitor is too big %d, check the FW TLV\n",
++ max_power))
++ return;
++
++ if (trans_pcie->fw_mon_page) {
++ dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
++ trans_pcie->fw_mon_size,
++ DMA_FROM_DEVICE);
++ return;
++ }
++
++ phys = 0;
++ for (power = max_power; power >= 11; power--) {
++ int order;
++
++ size = BIT(power);
++ order = get_order(size);
++ page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
++ order);
++ if (!page)
++ continue;
++
++ phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(trans->dev, phys)) {
++ __free_pages(page, order);
++ page = NULL;
++ continue;
++ }
++ IWL_INFO(trans,
++ "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
++ size, order);
++ break;
++ }
++
++ if (WARN_ON_ONCE(!page))
++ return;
++
++ if (power != max_power)
++ IWL_ERR(trans,
++ "Sorry - debug buffer is only %luK while you requested %luK\n",
++ (unsigned long)BIT(power - 10),
++ (unsigned long)BIT(max_power - 10));
++
++ trans_pcie->fw_mon_page = page;
++ trans_pcie->fw_mon_phys = phys;
++ trans_pcie->fw_mon_size = size;
++}
++
++static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
++{
++ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
++ ((reg & 0x0000ffff) | (2 << 28)));
++ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
++}
++
++static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
++{
++ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
++ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
++ ((reg & 0x0000ffff) | (3 << 28)));
++}
++
++static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
++{
++ if (trans->cfg->apmg_not_supported)
++ return;
++
++ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
++ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
++ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
++ ~APMG_PS_CTRL_MSK_PWR_SRC);
++ else
++ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
++ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
++ ~APMG_PS_CTRL_MSK_PWR_SRC);
++}
++
++/* PCI registers */
++#define PCI_CFG_RETRY_TIMEOUT 0x041
++
++static void iwl_pcie_apm_config(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ u16 lctl;
++ u16 cap;
++
++ /*
++ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
++ * Check if BIOS (or OS) enabled L1-ASPM on this device.
++ * If so (likely), disable L0S, so device moves directly L0->L1;
++ * costs negligible amount of power savings.
++ * If not (unlikely), enable L0S, so there is at least some
++ * power savings, even without L1.
++ */
++ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
++ if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
++ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
++ else
++ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
++ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
++
++ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
++ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
++ dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
++ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
++ trans->ltr_enabled ? "En" : "Dis");
++}
++
++/*
++ * Start up NIC's basic functionality after it has been reset
++ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
++ * NOTE: This does not load uCode nor start the embedded processor
++ */
++static int iwl_pcie_apm_init(struct iwl_trans *trans)
++{
++ int ret = 0;
++ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
++
++ /*
++ * Use "set_bit" below rather than "write", to preserve any hardware
++ * bits already set by default after reset.
++ */
++
++ /* Disable L0S exit timer (platform NMI Work/Around) */
++ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
++ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
++ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
++
++ /*
++ * Disable L0s without affecting L1;
++ * don't wait for ICH L0s (ICH bug W/A)
++ */
++ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
++ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
++
++ /* Set FH wait threshold to maximum (HW error during stress W/A) */
++ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
++
++ /*
++ * Enable HAP INTA (interrupt from management bus) to
++ * wake device's PCI Express link L1a -> L0s
++ */
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
++
++ iwl_pcie_apm_config(trans);
++
++ /* Configure analog phase-lock-loop before activating to D0A */
++ if (trans->cfg->base_params->pll_cfg_val)
++ iwl_set_bit(trans, CSR_ANA_PLL_CFG,
++ trans->cfg->base_params->pll_cfg_val);
++
++ /*
++ * Set "initialization complete" bit to move adapter from
++ * D0U* --> D0A* (powered-up active) state.
++ */
++ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++ /*
++ * Wait for clock stabilization; once stabilized, access to
++ * device-internal resources is supported, e.g. iwl_write_prph()
++ * and accesses to uCode SRAM.
++ */
++ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
++ if (ret < 0) {
++ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
++ goto out;
++ }
++
++ if (trans->cfg->host_interrupt_operation_mode) {
++ /*
++ * This is a bit of an abuse - This is needed for 7260 / 3160
++ * only check host_interrupt_operation_mode even if this is
++ * not related to host_interrupt_operation_mode.
++ *
++ * Enable the oscillator to count wake up time for L1 exit. This
++ * consumes slightly more power (100uA) - but allows to be sure
++ * that we wake up from L1 on time.
++ *
++ * This looks weird: read twice the same register, discard the
++ * value, set a bit, and yet again, read that same register
++ * just to discard the value. But that's the way the hardware
++ * seems to like it.
++ */
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
++ iwl_read_prph(trans, OSC_CLK);
++ iwl_read_prph(trans, OSC_CLK);
++ }
++
++ /*
++ * Enable DMA clock and wait for it to stabilize.
++ *
++ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
++ * bits do not disable clocks. This preserves any hardware
++ * bits already set by default in "CLK_CTRL_REG" after reset.
++ */
++ if (!trans->cfg->apmg_not_supported) {
++ iwl_write_prph(trans, APMG_CLK_EN_REG,
++ APMG_CLK_VAL_DMA_CLK_RQT);
++ udelay(20);
++
++ /* Disable L1-Active */
++ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
++ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
++
++ /* Clear the interrupt in APMG if the NIC is in RFKILL */
++ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
++ APMG_RTC_INT_STT_RFKILL);
++ }
++
++ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
++
++out:
++ return ret;
++}
++
++/*
++ * Enable LP XTAL to avoid HW bug where device may consume much power if
++ * FW is not loaded after device reset. LP XTAL is disabled by default
++ * after device HW reset. Do it only if XTAL is fed by internal source.
++ * Configure device's "persistence" mode to avoid resetting XTAL again when
++ * SHRD_HW_RST occurs in S3.
++ */
++static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
++{
++ int ret;
++ u32 apmg_gp1_reg;
++ u32 apmg_xtal_cfg_reg;
++ u32 dl_cfg_reg;
++
++ /* Force XTAL ON */
++ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++
++ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
++ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++ udelay(10);
++
++ /*
++ * Set "initialization complete" bit to move adapter from
++ * D0U* --> D0A* (powered-up active) state.
++ */
++ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++ /*
++ * Wait for clock stabilization; once stabilized, access to
++ * device-internal resources is possible.
++ */
++ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ 25000);
++ if (WARN_ON(ret < 0)) {
++ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
++ /* Release XTAL ON request */
++ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++ return;
++ }
++
++ /*
++ * Clear "disable persistence" to avoid LP XTAL resetting when
++ * SHRD_HW_RST is applied in S3.
++ */
++ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
++ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
++
++ /*
++ * Force APMG XTAL to be active to prevent its disabling by HW
++ * caused by APMG idle state.
++ */
++ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
++ SHR_APMG_XTAL_CFG_REG);
++ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
++ apmg_xtal_cfg_reg |
++ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
++
++ /*
++ * Reset entire device again - do controller reset (results in
++ * SHRD_HW_RST). Turn MAC off before proceeding.
++ */
++ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++ udelay(10);
++
++ /* Enable LP XTAL by indirect access through CSR */
++ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
++ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
++ SHR_APMG_GP1_WF_XTAL_LP_EN |
++ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
++
++ /* Clear delay line clock power up */
++ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
++ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
++ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
++
++ /*
++ * Enable persistence mode to avoid LP XTAL resetting when
++ * SHRD_HW_RST is applied in S3.
++ */
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
++
++ /*
++ * Clear "initialization complete" bit to move adapter from
++ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
++ */
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++ /* Activates XTAL resources monitor */
++ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
++ CSR_MONITOR_XTAL_RESOURCES);
++
++ /* Release XTAL ON request */
++ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++ udelay(10);
++
++ /* Release APMG XTAL */
++ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
++ apmg_xtal_cfg_reg &
++ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
++}
++
++static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
++{
++ int ret = 0;
++
++ /* stop device's busmaster DMA activity */
++ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
++
++ ret = iwl_poll_bit(trans, CSR_RESET,
++ CSR_RESET_REG_FLAG_MASTER_DISABLED,
++ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
++ if (ret < 0)
++ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
++
++ IWL_DEBUG_INFO(trans, "stop master\n");
++
++ return ret;
++}
++
++static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
++{
++ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
++
++ if (op_mode_leave) {
++ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
++ iwl_pcie_apm_init(trans);
++
++ /* inform ME that we are leaving */
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
++ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
++ APMG_PCIDEV_STT_VAL_WAKE_ME);
++ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
++ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++ CSR_RESET_LINK_PWR_MGMT_DISABLED);
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_PREPARE |
++ CSR_HW_IF_CONFIG_REG_ENABLE_PME);
++ mdelay(1);
++ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++ CSR_RESET_LINK_PWR_MGMT_DISABLED);
++ }
++ mdelay(5);
++ }
++
++ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
++
++ /* Stop device's DMA activity */
++ iwl_pcie_apm_stop_master(trans);
++
++ if (trans->cfg->lp_xtal_workaround) {
++ iwl_pcie_apm_lp_xtal_enable(trans);
++ return;
++ }
++
++ /* Reset the entire device */
++ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++ udelay(10);
++
++ /*
++ * Clear "initialization complete" bit to move adapter from
++ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
++ */
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++}
++
++static int iwl_pcie_nic_init(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ /* nic_init */
++ spin_lock(&trans_pcie->irq_lock);
++ iwl_pcie_apm_init(trans);
++
++ spin_unlock(&trans_pcie->irq_lock);
++
++ iwl_pcie_set_pwr(trans, false);
++
++ iwl_op_mode_nic_config(trans->op_mode);
++
++ /* Allocate the RX queue, or reset if it is already allocated */
++ iwl_pcie_rx_init(trans);
++
++ /* Allocate or reset and init all Tx and Command queues */
++ if (iwl_pcie_tx_init(trans))
++ return -ENOMEM;
++
++ if (trans->cfg->base_params->shadow_reg_enable) {
++ /* enable shadow regs in HW */
++ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
++ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
++ }
++
++ return 0;
++}
++
++#define HW_READY_TIMEOUT (50)
++
++/* Note: returns poll_bit return value, which is >= 0 if success */
++static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
++{
++ int ret;
++
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
++
++ /* See if we got it */
++ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
++ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
++ HW_READY_TIMEOUT);
++
++ if (ret >= 0)
++ iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
++
++ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
++ return ret;
++}
++
++/* Note: returns standard 0/-ERROR code */
++static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
++{
++ int ret;
++ int t = 0;
++ int iter;
++
++ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
++
++ ret = iwl_pcie_set_hw_ready(trans);
++ /* If the card is ready, exit 0 */
++ if (ret >= 0)
++ return 0;
++
++ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++ CSR_RESET_LINK_PWR_MGMT_DISABLED);
++ msleep(1);
++
++ for (iter = 0; iter < 10; iter++) {
++ /* If HW is not ready, prepare the conditions to check again */
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_PREPARE);
++
++ do {
++ ret = iwl_pcie_set_hw_ready(trans);
++ if (ret >= 0)
++ return 0;
++
++ usleep_range(200, 1000);
++ t += 200;
++ } while (t < 150000);
++ msleep(25);
++ }
++
++ IWL_ERR(trans, "Couldn't prepare the card\n");
++
++ return ret;
++}
++
++/*
++ * ucode
++ */
++static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
++ dma_addr_t phy_addr, u32 byte_cnt)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int ret;
++
++ trans_pcie->ucode_write_complete = false;
++
++ iwl_write_direct32(trans,
++ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
++ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
++
++ iwl_write_direct32(trans,
++ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
++ dst_addr);
++
++ iwl_write_direct32(trans,
++ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
++ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
++
++ iwl_write_direct32(trans,
++ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
++ (iwl_get_dma_hi_addr(phy_addr)
++ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
++
++ iwl_write_direct32(trans,
++ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
++ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
++ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
++ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
++
++ iwl_write_direct32(trans,
++ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
++ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
++ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
++ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
++
++ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
++ trans_pcie->ucode_write_complete, 5 * HZ);
++ if (!ret) {
++ IWL_ERR(trans, "Failed to load firmware chunk!\n");
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
++ const struct fw_desc *section)
++{
++ u8 *v_addr;
++ dma_addr_t p_addr;
++ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
++ int ret = 0;
++
++ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
++ section_num);
++
++ v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
++ GFP_KERNEL | __GFP_NOWARN);
++ if (!v_addr) {
++ IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
++ chunk_sz = PAGE_SIZE;
++ v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
++ &p_addr, GFP_KERNEL);
++ if (!v_addr)
++ return -ENOMEM;
++ }
++
++ for (offset = 0; offset < section->len; offset += chunk_sz) {
++ u32 copy_size, dst_addr;
++ bool extended_addr = false;
++
++ copy_size = min_t(u32, chunk_sz, section->len - offset);
++ dst_addr = section->offset + offset;
++
++ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
++ dst_addr <= IWL_FW_MEM_EXTENDED_END)
++ extended_addr = true;
++
++ if (extended_addr)
++ iwl_set_bits_prph(trans, LMPM_CHICK,
++ LMPM_CHICK_EXTENDED_ADDR_SPACE);
++
++ memcpy(v_addr, (u8 *)section->data + offset, copy_size);
++ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
++ copy_size);
++
++ if (extended_addr)
++ iwl_clear_bits_prph(trans, LMPM_CHICK,
++ LMPM_CHICK_EXTENDED_ADDR_SPACE);
++
++ if (ret) {
++ IWL_ERR(trans,
++ "Could not load the [%d] uCode section\n",
++ section_num);
++ break;
++ }
++ }
++
++ dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
++ return ret;
++}
++
++/*
++ * Driver Takes the ownership on secure machine before FW load
++ * and prevent race with the BT load.
++ * W/A for ROM bug. (should be remove in the next Si step)
++ */
++static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
++{
++ u32 val, loop = 1000;
++
++ /*
++ * Check the RSA semaphore is accessible.
++ * If the HW isn't locked and the rsa semaphore isn't accessible,
++ * we are in trouble.
++ */
++ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
++ if (val & (BIT(1) | BIT(17))) {
++ IWL_INFO(trans,
++ "can't access the RSA semaphore it is write protected\n");
++ return 0;
++ }
++
++ /* take ownership on the AUX IF */
++ iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
++ iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
++
++ do {
++ iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
++ val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
++ if (val == 0x1) {
++ iwl_write_prph(trans, RSA_ENABLE, 0);
++ return 0;
++ }
++
++ udelay(10);
++ loop--;
++ } while (loop > 0);
++
++ IWL_ERR(trans, "Failed to take ownership on secure machine\n");
++ return -EIO;
++}
++
++static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
++ const struct fw_img *image,
++ int cpu,
++ int *first_ucode_section)
++{
++ int shift_param;
++ int i, ret = 0, sec_num = 0x1;
++ u32 val, last_read_idx = 0;
++
++ if (cpu == 1) {
++ shift_param = 0;
++ *first_ucode_section = 0;
++ } else {
++ shift_param = 16;
++ (*first_ucode_section)++;
++ }
++
++ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
++ last_read_idx = i;
++
++ /*
++ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
++ * CPU1 to CPU2.
++ * PAGING_SEPARATOR_SECTION delimiter - separate between
++ * CPU2 non paged to CPU2 paging sec.
++ */
++ if (!image->sec[i].data ||
++ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
++ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
++ IWL_DEBUG_FW(trans,
++ "Break since Data not valid or Empty section, sec = %d\n",
++ i);
++ break;
++ }
++
++ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
++ if (ret)
++ return ret;
++
++ /* Notify the ucode of the loaded section number and status */
++ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
++ val = val | (sec_num << shift_param);
++ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
++ sec_num = (sec_num << 1) | 0x1;
++ }
++
++ *first_ucode_section = last_read_idx;
++
++ if (cpu == 1)
++ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
++ else
++ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
++
++ return 0;
++}
++
++static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
++ const struct fw_img *image,
++ int cpu,
++ int *first_ucode_section)
++{
++ int shift_param;
++ int i, ret = 0;
++ u32 last_read_idx = 0;
++
++ if (cpu == 1) {
++ shift_param = 0;
++ *first_ucode_section = 0;
++ } else {
++ shift_param = 16;
++ (*first_ucode_section)++;
++ }
++
++ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
++ last_read_idx = i;
++
++ /*
++ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
++ * CPU1 to CPU2.
++ * PAGING_SEPARATOR_SECTION delimiter - separate between
++ * CPU2 non paged to CPU2 paging sec.
++ */
++ if (!image->sec[i].data ||
++ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
++ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
++ IWL_DEBUG_FW(trans,
++ "Break since Data not valid or Empty section, sec = %d\n",
++ i);
++ break;
++ }
++
++ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
++ if (ret)
++ return ret;
++ }
++
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ iwl_set_bits_prph(trans,
++ CSR_UCODE_LOAD_STATUS_ADDR,
++ (LMPM_CPU_UCODE_LOADING_COMPLETED |
++ LMPM_CPU_HDRS_LOADING_COMPLETED |
++ LMPM_CPU_UCODE_LOADING_STARTED) <<
++ shift_param);
++
++ *first_ucode_section = last_read_idx;
++
++ return 0;
++}
++
++static void iwl_pcie_apply_destination(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
++ int i;
++
++ if (dest->version)
++ IWL_ERR(trans,
++ "DBG DEST version is %d - expect issues\n",
++ dest->version);
++
++ IWL_INFO(trans, "Applying debug destination %s\n",
++ get_fw_dbg_mode_string(dest->monitor_mode));
++
++ if (dest->monitor_mode == EXTERNAL_MODE)
++ iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
++ else
++ IWL_WARN(trans, "PCI should have external buffer debug\n");
++
++ for (i = 0; i < trans->dbg_dest_reg_num; i++) {
++ u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
++ u32 val = le32_to_cpu(dest->reg_ops[i].val);
++
++ switch (dest->reg_ops[i].op) {
++ case CSR_ASSIGN:
++ iwl_write32(trans, addr, val);
++ break;
++ case CSR_SETBIT:
++ iwl_set_bit(trans, addr, BIT(val));
++ break;
++ case CSR_CLEARBIT:
++ iwl_clear_bit(trans, addr, BIT(val));
++ break;
++ case PRPH_ASSIGN:
++ iwl_write_prph(trans, addr, val);
++ break;
++ case PRPH_SETBIT:
++ iwl_set_bits_prph(trans, addr, BIT(val));
++ break;
++ case PRPH_CLEARBIT:
++ iwl_clear_bits_prph(trans, addr, BIT(val));
++ break;
++ case PRPH_BLOCKBIT:
++ if (iwl_read_prph(trans, addr) & BIT(val)) {
++ IWL_ERR(trans,
++ "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
++ val, addr);
++ goto monitor;
++ }
++ break;
++ default:
++ IWL_ERR(trans, "FW debug - unknown OP %d\n",
++ dest->reg_ops[i].op);
++ break;
++ }
++ }
++
++monitor:
++ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
++ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
++ trans_pcie->fw_mon_phys >> dest->base_shift);
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size - 256) >>
++ dest->end_shift);
++ else
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size) >>
++ dest->end_shift);
++ }
++}
++
++static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
++ const struct fw_img *image)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int ret = 0;
++ int first_ucode_section;
++
++ IWL_DEBUG_FW(trans, "working with %s CPU\n",
++ image->is_dual_cpus ? "Dual" : "Single");
++
++ /* load to FW the binary non secured sections of CPU1 */
++ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
++ if (ret)
++ return ret;
++
++ if (image->is_dual_cpus) {
++ /* set CPU2 header address */
++ iwl_write_prph(trans,
++ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
++ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
++
++ /* load to FW the binary sections of CPU2 */
++ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
++ &first_ucode_section);
++ if (ret)
++ return ret;
++ }
++
++ /* supported for 7000 only for the moment */
++ if (iwlwifi_mod_params.fw_monitor &&
++ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
++ iwl_pcie_alloc_fw_monitor(trans, 0);
++
++ if (trans_pcie->fw_mon_size) {
++ iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
++ trans_pcie->fw_mon_phys >> 4);
++ iwl_write_prph(trans, MON_BUFF_END_ADDR,
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size) >> 4);
++ }
++ } else if (trans->dbg_dest_tlv) {
++ iwl_pcie_apply_destination(trans);
++ }
++
++ /* release CPU reset */
++ iwl_write32(trans, CSR_RESET, 0);
++
++ return 0;
++}
++
++static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
++ const struct fw_img *image)
++{
++ int ret = 0;
++ int first_ucode_section;
++
++ IWL_DEBUG_FW(trans, "working with %s CPU\n",
++ image->is_dual_cpus ? "Dual" : "Single");
++
++ if (trans->dbg_dest_tlv)
++ iwl_pcie_apply_destination(trans);
++
++ /* TODO: remove in the next Si step */
++ ret = iwl_pcie_rsa_race_bug_wa(trans);
++ if (ret)
++ return ret;
++
++ /* configure the ucode to be ready to get the secured image */
++ /* release CPU reset */
++ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
++
++ /* load to FW the binary Secured sections of CPU1 */
++ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
++ &first_ucode_section);
++ if (ret)
++ return ret;
++
++ /* load to FW the binary sections of CPU2 */
++ return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
++ &first_ucode_section);
++}
++
++static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
++ const struct fw_img *fw, bool run_in_rfkill)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ bool hw_rfkill;
++ int ret;
++
++ mutex_lock(&trans_pcie->mutex);
++
++ /* Someone called stop_device, don't try to start_fw */
++ if (trans_pcie->is_down) {
++ IWL_WARN(trans,
++ "Can't start_fw since the HW hasn't been started\n");
++ ret = EIO;
++ goto out;
++ }
++
++ /* This may fail if AMT took ownership of the device */
++ if (iwl_pcie_prepare_card_hw(trans)) {
++ IWL_WARN(trans, "Exit HW not ready\n");
++ ret = -EIO;
++ goto out;
++ }
++
++ iwl_enable_rfkill_int(trans);
++
++ /* If platform's RF_KILL switch is NOT set to KILL */
++ hw_rfkill = iwl_is_rfkill_set(trans);
++ if (hw_rfkill)
++ set_bit(STATUS_RFKILL, &trans->status);
++ else
++ clear_bit(STATUS_RFKILL, &trans->status);
++ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++ if (hw_rfkill && !run_in_rfkill) {
++ ret = -ERFKILL;
++ goto out;
++ }
++
++ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
++
++ ret = iwl_pcie_nic_init(trans);
++ if (ret) {
++ IWL_ERR(trans, "Unable to init nic\n");
++ goto out;
++ }
++
++ /* make sure rfkill handshake bits are cleared */
++ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
++ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
++
++ /* clear (again), then enable host interrupts */
++ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
++ iwl_enable_interrupts(trans);
++
++ /* really make sure rfkill handshake bits are cleared */
++ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++
++ /* Load the given image to the HW */
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ ret = iwl_pcie_load_given_ucode_8000(trans, fw);
++ else
++ ret = iwl_pcie_load_given_ucode(trans, fw);
++
++out:
++ mutex_unlock(&trans_pcie->mutex);
++ return ret;
++}
++
++static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
++{
++ iwl_pcie_reset_ict(trans);
++ iwl_pcie_tx_start(trans, scd_addr);
++}
++
++static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ bool hw_rfkill, was_hw_rfkill;
++
++ lockdep_assert_held(&trans_pcie->mutex);
++
++ if (trans_pcie->is_down)
++ return;
++
++ trans_pcie->is_down = true;
++
++ was_hw_rfkill = iwl_is_rfkill_set(trans);
++
++ /* tell the device to stop sending interrupts */
++ spin_lock(&trans_pcie->irq_lock);
++ iwl_disable_interrupts(trans);
++ spin_unlock(&trans_pcie->irq_lock);
++
++ /* device going down, Stop using ICT table */
++ iwl_pcie_disable_ict(trans);
++
++ /*
++ * If a HW restart happens during firmware loading,
++ * then the firmware loading might call this function
++ * and later it might be called again due to the
++ * restart. So don't process again if the device is
++ * already dead.
++ */
++ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
++ IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_tx_stop(trans);
++ iwl_pcie_rx_stop(trans);
++
++ /* Power-down device's busmaster DMA clocks */
++ if (!trans->cfg->apmg_not_supported) {
++ iwl_write_prph(trans, APMG_CLK_DIS_REG,
++ APMG_CLK_VAL_DMA_CLK_RQT);
++ udelay(5);
++ }
++ }
++
++ /* Make sure (redundant) we've released our request to stay awake */
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++
++ /* Stop the device, and put it in low power state */
++ iwl_pcie_apm_stop(trans, false);
++
++ /* stop and reset the on-board processor */
++ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++ udelay(20);
++
++ /*
++ * Upon stop, the APM issues an interrupt if HW RF kill is set.
++ * This is a bug in certain verions of the hardware.
++ * Certain devices also keep sending HW RF kill interrupt all
++ * the time, unless the interrupt is ACKed even if the interrupt
++ * should be masked. Re-ACK all the interrupts here.
++ */
++ spin_lock(&trans_pcie->irq_lock);
++ iwl_disable_interrupts(trans);
++ spin_unlock(&trans_pcie->irq_lock);
++
++
++ /* clear all status bits */
++ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
++ clear_bit(STATUS_INT_ENABLED, &trans->status);
++ clear_bit(STATUS_TPOWER_PMI, &trans->status);
++ clear_bit(STATUS_RFKILL, &trans->status);
++
++ /*
++ * Even if we stop the HW, we still want the RF kill
++ * interrupt
++ */
++ iwl_enable_rfkill_int(trans);
++
++ /*
++ * Check again since the RF kill state may have changed while
++ * all the interrupts were disabled, in this case we couldn't
++ * receive the RF kill interrupt and update the state in the
++ * op_mode.
++ * Don't call the op_mode if the rkfill state hasn't changed.
++ * This allows the op_mode to call stop_device from the rfkill
++ * notification without endless recursion. Under very rare
++ * circumstances, we might have a small recursion if the rfkill
++ * state changed exactly now while we were called from stop_device.
++ * This is very unlikely but can happen and is supported.
++ */
++ hw_rfkill = iwl_is_rfkill_set(trans);
++ if (hw_rfkill)
++ set_bit(STATUS_RFKILL, &trans->status);
++ else
++ clear_bit(STATUS_RFKILL, &trans->status);
++ if (hw_rfkill != was_hw_rfkill)
++ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++
++ /* re-take ownership to prevent other users from stealing the deivce */
++ iwl_pcie_prepare_card_hw(trans);
++}
++
++static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ mutex_lock(&trans_pcie->mutex);
++ _iwl_trans_pcie_stop_device(trans, low_power);
++ mutex_unlock(&trans_pcie->mutex);
++}
++
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
++{
++ struct iwl_trans_pcie __maybe_unused *trans_pcie =
++ IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ lockdep_assert_held(&trans_pcie->mutex);
++
++ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
++ _iwl_trans_pcie_stop_device(trans, true);
++}
++
++static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
++ /* Enable persistence mode to avoid reset */
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
++ }
++
++ iwl_disable_interrupts(trans);
++
++ /*
++ * in testing mode, the host stays awake and the
++ * hardware won't be reset (not even partially)
++ */
++ if (test)
++ return;
++
++ iwl_pcie_disable_ict(trans);
++
++ synchronize_irq(trans_pcie->pci_dev->irq);
++
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++ if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
++ /*
++ * reset TX queues -- some of their registers reset during S3
++ * so if we don't reset everything here the D3 image would try
++ * to execute some invalid memory upon resume
++ */
++ iwl_trans_pcie_tx_reset(trans);
++ }
++
++ iwl_pcie_set_pwr(trans, true);
++}
++
++static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
++ enum iwl_d3_status *status,
++ bool test)
++{
++ u32 val;
++ int ret;
++
++ if (test) {
++ iwl_enable_interrupts(trans);
++ *status = IWL_D3_STATUS_ALIVE;
++ return 0;
++ }
++
++ /*
++ * Also enables interrupts - none will happen as the device doesn't
++ * know we're waking it up, only when the opmode actually tells it
++ * after this call.
++ */
++ iwl_pcie_reset_ict(trans);
++
++ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ udelay(2);
++
++ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ 25000);
++ if (ret < 0) {
++ IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
++ return ret;
++ }
++
++ iwl_pcie_set_pwr(trans, false);
++
++ if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
++ iwl_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++ } else {
++ iwl_trans_pcie_tx_reset(trans);
++
++ ret = iwl_pcie_rx_init(trans);
++ if (ret) {
++ IWL_ERR(trans,
++ "Failed to resume the device (RX reset)\n");
++ return ret;
++ }
++ }
++
++ val = iwl_read32(trans, CSR_RESET);
++ if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
++ *status = IWL_D3_STATUS_RESET;
++ else
++ *status = IWL_D3_STATUS_ALIVE;
++
++ return 0;
++}
++
++static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ bool hw_rfkill;
++ int err;
++
++ lockdep_assert_held(&trans_pcie->mutex);
++
++ err = iwl_pcie_prepare_card_hw(trans);
++ if (err) {
++ IWL_ERR(trans, "Error while preparing HW: %d\n", err);
++ return err;
++ }
++
++ /* Reset the entire device */
++ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++ usleep_range(10, 15);
++
++ iwl_pcie_apm_init(trans);
++
++ /* From now on, the op_mode will be kept updated about RF kill state */
++ iwl_enable_rfkill_int(trans);
++
++ /* Set is_down to false here so that...*/
++ trans_pcie->is_down = false;
++
++ hw_rfkill = iwl_is_rfkill_set(trans);
++ if (hw_rfkill)
++ set_bit(STATUS_RFKILL, &trans->status);
++ else
++ clear_bit(STATUS_RFKILL, &trans->status);
++ /* ... rfkill can call stop_device and set it false if needed */
++ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++
++ return 0;
++}
++
++static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int ret;
++
++ mutex_lock(&trans_pcie->mutex);
++ ret = _iwl_trans_pcie_start_hw(trans, low_power);
++ mutex_unlock(&trans_pcie->mutex);
++
++ return ret;
++}
++
++static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ mutex_lock(&trans_pcie->mutex);
++
++ /* disable interrupts - don't enable HW RF kill interrupt */
++ spin_lock(&trans_pcie->irq_lock);
++ iwl_disable_interrupts(trans);
++ spin_unlock(&trans_pcie->irq_lock);
++
++ iwl_pcie_apm_stop(trans, true);
++
++ spin_lock(&trans_pcie->irq_lock);
++ iwl_disable_interrupts(trans);
++ spin_unlock(&trans_pcie->irq_lock);
++
++ iwl_pcie_disable_ict(trans);
++
++ mutex_unlock(&trans_pcie->mutex);
++
++ synchronize_irq(trans_pcie->pci_dev->irq);
++}
++
++static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
++{
++ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
++{
++ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
++{
++ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
++{
++ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
++ ((reg & 0x000FFFFF) | (3 << 24)));
++ return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
++}
++
++static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
++ u32 val)
++{
++ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
++ ((addr & 0x000FFFFF) | (3 << 24)));
++ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
++}
++
++static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
++{
++ WARN_ON(1);
++ return 0;
++}
++
++static void iwl_trans_pcie_configure(struct iwl_trans *trans,
++ const struct iwl_trans_config *trans_cfg)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
++ trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
++ trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
++ if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
++ trans_pcie->n_no_reclaim_cmds = 0;
++ else
++ trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
++ if (trans_pcie->n_no_reclaim_cmds)
++ memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
++ trans_pcie->n_no_reclaim_cmds * sizeof(u8));
++
++ trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
++ trans_pcie->rx_page_order =
++ iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
++
++ trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
++ trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
++ trans_pcie->scd_set_active = trans_cfg->scd_set_active;
++ trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
++
++ trans->command_groups = trans_cfg->command_groups;
++ trans->command_groups_size = trans_cfg->command_groups_size;
++
++ /* init ref_count to 1 (should be cleared when ucode is loaded) */
++ trans_pcie->ref_count = 1;
++
++ /* Initialize NAPI here - it should be before registering to mac80211
++ * in the opmode but after the HW struct is allocated.
++ * As this function may be called again in some corner cases don't
++ * do anything if NAPI was already initialized.
++ */
++ if (!trans_pcie->napi.poll) {
++ init_dummy_netdev(&trans_pcie->napi_dev);
++ netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
++ iwl_pcie_dummy_napi_poll, 64);
++ }
++}
++
++void iwl_trans_pcie_free(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int i;
++
++ synchronize_irq(trans_pcie->pci_dev->irq);
++
++ iwl_pcie_tx_free(trans);
++ iwl_pcie_rx_free(trans);
++
++ free_irq(trans_pcie->pci_dev->irq, trans);
++ iwl_pcie_free_ict(trans);
++
++ pci_disable_msi(trans_pcie->pci_dev);
++ iounmap(trans_pcie->hw_base);
++ pci_release_regions(trans_pcie->pci_dev);
++ pci_disable_device(trans_pcie->pci_dev);
++
++ if (trans_pcie->napi.poll)
++ netif_napi_del(&trans_pcie->napi);
++
++ iwl_pcie_free_fw_monitor(trans);
++
++ for_each_possible_cpu(i) {
++ struct iwl_tso_hdr_page *p =
++ per_cpu_ptr(trans_pcie->tso_hdr_page, i);
++
++ if (p->page)
++ __free_page(p->page);
++ }
++
++ free_percpu(trans_pcie->tso_hdr_page);
++ iwl_trans_free(trans);
++}
++
++static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
++{
++ if (state)
++ set_bit(STATUS_TPOWER_PMI, &trans->status);
++ else
++ clear_bit(STATUS_TPOWER_PMI, &trans->status);
++}
++
++static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
++ unsigned long *flags)
++{
++ int ret;
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
++
++ if (trans_pcie->cmd_hold_nic_awake)
++ goto out;
++
++ /* this bit wakes up the NIC */
++ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ udelay(2);
++
++ /*
++ * These bits say the device is running, and should keep running for
++ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
++ * but they do not indicate that embedded SRAM is restored yet;
++ * 3945 and 4965 have volatile SRAM, and must save/restore contents
++ * to/from host DRAM when sleeping/waking for power-saving.
++ * Each direction takes approximately 1/4 millisecond; with this
++ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
++ * series of register accesses are expected (e.g. reading Event Log),
++ * to keep device from sleeping.
++ *
++ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
++ * SRAM is okay/restored. We don't check that here because this call
++ * is just for hardware register access; but GP1 MAC_SLEEP check is a
++ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
++ *
++ * 5000 series and later (including 1000 series) have non-volatile SRAM,
++ * and do not save/restore SRAM when power cycling.
++ */
++ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
++ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
++ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
++ if (unlikely(ret < 0)) {
++ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
++ WARN_ONCE(1,
++ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
++ iwl_read32(trans, CSR_GP_CNTRL));
++ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++ return false;
++ }
++
++out:
++ /*
++ * Fool sparse by faking we release the lock - sparse will
++ * track nic_access anyway.
++ */
++ __release(&trans_pcie->reg_lock);
++ return true;
++}
++
++static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
++ unsigned long *flags)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ lockdep_assert_held(&trans_pcie->reg_lock);
++
++ /*
++ * Fool sparse by faking we acquiring the lock - sparse will
++ * track nic_access anyway.
++ */
++ __acquire(&trans_pcie->reg_lock);
++
++ if (trans_pcie->cmd_hold_nic_awake)
++ goto out;
++
++ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++ /*
++ * Above we read the CSR_GP_CNTRL register, which will flush
++ * any previous writes, but we need the write that clears the
++ * MAC_ACCESS_REQ bit to be performed before any other writes
++ * scheduled on different CPUs (after we drop reg_lock).
++ */
++ mmiowb();
++out:
++ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++}
++
++static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
++ void *buf, int dwords)
++{
++ unsigned long flags;
++ int offs, ret = 0;
++ u32 *vals = buf;
++
++ if (iwl_trans_grab_nic_access(trans, &flags)) {
++ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
++ for (offs = 0; offs < dwords; offs++)
++ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
++ iwl_trans_release_nic_access(trans, &flags);
++ } else {
++ ret = -EBUSY;
++ }
++ return ret;
++}
++
++static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
++ const void *buf, int dwords)
++{
++ unsigned long flags;
++ int offs, ret = 0;
++ const u32 *vals = buf;
++
++ if (iwl_trans_grab_nic_access(trans, &flags)) {
++ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
++ for (offs = 0; offs < dwords; offs++)
++ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
++ vals ? vals[offs] : 0);
++ iwl_trans_release_nic_access(trans, &flags);
++ } else {
++ ret = -EBUSY;
++ }
++ return ret;
++}
++
++static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
++ unsigned long txqs,
++ bool freeze)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int queue;
++
++ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
++ struct iwl_txq *txq = &trans_pcie->txq[queue];
++ unsigned long now;
++
++ spin_lock_bh(&txq->lock);
++
++ now = jiffies;
++
++ if (txq->frozen == freeze)
++ goto next_queue;
++
++ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
++ freeze ? "Freezing" : "Waking", queue);
++
++ txq->frozen = freeze;
++
++ if (txq->q.read_ptr == txq->q.write_ptr)
++ goto next_queue;
++
++ if (freeze) {
++ if (unlikely(time_after(now,
++ txq->stuck_timer.expires))) {
++ /*
++ * The timer should have fired, maybe it is
++ * spinning right now on the lock.
++ */
++ goto next_queue;
++ }
++ /* remember how long until the timer fires */
++ txq->frozen_expiry_remainder =
++ txq->stuck_timer.expires - now;
++ del_timer(&txq->stuck_timer);
++ goto next_queue;
++ }
++
++ /*
++ * Wake a non-empty queue -> arm timer with the
++ * remainder before it froze
++ */
++ mod_timer(&txq->stuck_timer,
++ now + txq->frozen_expiry_remainder);
++
++next_queue:
++ spin_unlock_bh(&txq->lock);
++ }
++}
++
++static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int i;
++
++ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
++ struct iwl_txq *txq = &trans_pcie->txq[i];
++
++ if (i == trans_pcie->cmd_queue)
++ continue;
++
++ spin_lock_bh(&txq->lock);
++
++ if (!block && !(WARN_ON_ONCE(!txq->block))) {
++ txq->block--;
++ if (!txq->block) {
++ iwl_write32(trans, HBUS_TARG_WRPTR,
++ txq->q.write_ptr | (i << 8));
++ }
++ } else if (block) {
++ txq->block++;
++ }
++
++ spin_unlock_bh(&txq->lock);
++ }
++}
++
++#define IWL_FLUSH_WAIT_MS 2000
++
++static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct iwl_txq *txq;
++ struct iwl_queue *q;
++ int cnt;
++ unsigned long now = jiffies;
++ u32 scd_sram_addr;
++ u8 buf[16];
++ int ret = 0;
++
++ /* waiting for all the tx frames complete might take a while */
++ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++ u8 wr_ptr;
++
++ if (cnt == trans_pcie->cmd_queue)
++ continue;
++ if (!test_bit(cnt, trans_pcie->queue_used))
++ continue;
++ if (!(BIT(cnt) & txq_bm))
++ continue;
++
++ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
++ txq = &trans_pcie->txq[cnt];
++ q = &txq->q;
++ wr_ptr = ACCESS_ONCE(q->write_ptr);
++
++ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
++ !time_after(jiffies,
++ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
++ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
++
++ if (WARN_ONCE(wr_ptr != write_ptr,
++ "WR pointer moved while flushing %d -> %d\n",
++ wr_ptr, write_ptr))
++ return -ETIMEDOUT;
++ msleep(1);
++ }
++
++ if (q->read_ptr != q->write_ptr) {
++ IWL_ERR(trans,
++ "fail to flush all tx fifo queues Q %d\n", cnt);
++ ret = -ETIMEDOUT;
++ break;
++ }
++ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
++ }
++
++ if (!ret)
++ return 0;
++
++ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
++ txq->q.read_ptr, txq->q.write_ptr);
++
++ scd_sram_addr = trans_pcie->scd_base_addr +
++ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
++ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
++
++ iwl_print_hex_error(trans, buf, sizeof(buf));
++
++ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
++ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
++ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
++
++ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
++ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
++ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
++ u32 tbl_dw =
++ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
++ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
++
++ if (cnt & 0x1)
++ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
++ else
++ tbl_dw = tbl_dw & 0x0000FFFF;
++
++ IWL_ERR(trans,
++ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
++ cnt, active ? "" : "in", fifo, tbl_dw,
++ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
++ (TFD_QUEUE_SIZE_MAX - 1),
++ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
++ }
++
++ return ret;
++}
++
++static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
++ u32 mask, u32 value)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ unsigned long flags;
++
++ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
++ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++}
++
++void iwl_trans_pcie_ref(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ unsigned long flags;
++
++ if (iwlwifi_mod_params.d0i3_disable)
++ return;
++
++ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
++ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
++ trans_pcie->ref_count++;
++ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++}
++
++void iwl_trans_pcie_unref(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ unsigned long flags;
++
++ if (iwlwifi_mod_params.d0i3_disable)
++ return;
++
++ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
++ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
++ if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
++ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++ return;
++ }
++ trans_pcie->ref_count--;
++ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++}
++
++static const char *get_csr_string(int cmd)
++{
++#define IWL_CMD(x) case x: return #x
++ switch (cmd) {
++ IWL_CMD(CSR_HW_IF_CONFIG_REG);
++ IWL_CMD(CSR_INT_COALESCING);
++ IWL_CMD(CSR_INT);
++ IWL_CMD(CSR_INT_MASK);
++ IWL_CMD(CSR_FH_INT_STATUS);
++ IWL_CMD(CSR_GPIO_IN);
++ IWL_CMD(CSR_RESET);
++ IWL_CMD(CSR_GP_CNTRL);
++ IWL_CMD(CSR_HW_REV);
++ IWL_CMD(CSR_EEPROM_REG);
++ IWL_CMD(CSR_EEPROM_GP);
++ IWL_CMD(CSR_OTP_GP_REG);
++ IWL_CMD(CSR_GIO_REG);
++ IWL_CMD(CSR_GP_UCODE_REG);
++ IWL_CMD(CSR_GP_DRIVER_REG);
++ IWL_CMD(CSR_UCODE_DRV_GP1);
++ IWL_CMD(CSR_UCODE_DRV_GP2);
++ IWL_CMD(CSR_LED_REG);
++ IWL_CMD(CSR_DRAM_INT_TBL_REG);
++ IWL_CMD(CSR_GIO_CHICKEN_BITS);
++ IWL_CMD(CSR_ANA_PLL_CFG);
++ IWL_CMD(CSR_HW_REV_WA_REG);
++ IWL_CMD(CSR_MONITOR_STATUS_REG);
++ IWL_CMD(CSR_DBG_HPET_MEM_REG);
++ default:
++ return "UNKNOWN";
++ }
++#undef IWL_CMD
++}
++
++void iwl_pcie_dump_csr(struct iwl_trans *trans)
++{
++ int i;
++ static const u32 csr_tbl[] = {
++ CSR_HW_IF_CONFIG_REG,
++ CSR_INT_COALESCING,
++ CSR_INT,
++ CSR_INT_MASK,
++ CSR_FH_INT_STATUS,
++ CSR_GPIO_IN,
++ CSR_RESET,
++ CSR_GP_CNTRL,
++ CSR_HW_REV,
++ CSR_EEPROM_REG,
++ CSR_EEPROM_GP,
++ CSR_OTP_GP_REG,
++ CSR_GIO_REG,
++ CSR_GP_UCODE_REG,
++ CSR_GP_DRIVER_REG,
++ CSR_UCODE_DRV_GP1,
++ CSR_UCODE_DRV_GP2,
++ CSR_LED_REG,
++ CSR_DRAM_INT_TBL_REG,
++ CSR_GIO_CHICKEN_BITS,
++ CSR_ANA_PLL_CFG,
++ CSR_MONITOR_STATUS_REG,
++ CSR_HW_REV_WA_REG,
++ CSR_DBG_HPET_MEM_REG
++ };
++ IWL_ERR(trans, "CSR values:\n");
++ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
++ "CSR_INT_PERIODIC_REG)\n");
++ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
++ IWL_ERR(trans, " %25s: 0X%08x\n",
++ get_csr_string(csr_tbl[i]),
++ iwl_read32(trans, csr_tbl[i]));
++ }
++}
++
++#ifdef CONFIG_IWLWIFI_DEBUGFS
++/* create and remove of files */
++#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
++ if (!debugfs_create_file(#name, mode, parent, trans, \
++ &iwl_dbgfs_##name##_ops)) \
++ goto err; \
++} while (0)
++
++/* file operation */
++#define DEBUGFS_READ_FILE_OPS(name) \
++static const struct file_operations iwl_dbgfs_##name##_ops = { \
++ .read = iwl_dbgfs_##name##_read, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++#define DEBUGFS_WRITE_FILE_OPS(name) \
++static const struct file_operations iwl_dbgfs_##name##_ops = { \
++ .write = iwl_dbgfs_##name##_write, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
++static const struct file_operations iwl_dbgfs_##name##_ops = { \
++ .write = iwl_dbgfs_##name##_write, \
++ .read = iwl_dbgfs_##name##_read, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct iwl_txq *txq;
++ struct iwl_queue *q;
++ char *buf;
++ int pos = 0;
++ int cnt;
++ int ret;
++ size_t bufsz;
++
++ bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
++
++ if (!trans_pcie->txq)
++ return -EAGAIN;
++
++ buf = kzalloc(bufsz, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++ txq = &trans_pcie->txq[cnt];
++ q = &txq->q;
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
++ cnt, q->read_ptr, q->write_ptr,
++ !!test_bit(cnt, trans_pcie->queue_used),
++ !!test_bit(cnt, trans_pcie->queue_stopped),
++ txq->need_update, txq->frozen,
++ (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
++ }
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++ kfree(buf);
++ return ret;
++}
++
++static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct iwl_rxq *rxq = &trans_pcie->rxq;
++ char buf[256];
++ int pos = 0;
++ const size_t bufsz = sizeof(buf);
++
++ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
++ rxq->read);
++ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
++ rxq->write);
++ pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
++ rxq->write_actual);
++ pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
++ rxq->need_update);
++ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
++ rxq->free_count);
++ if (rxq->rb_stts) {
++ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
++ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
++ } else {
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "closed_rb_num: Not Allocated\n");
++ }
++ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++}
++
++static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
++
++ int pos = 0;
++ char *buf;
++ int bufsz = 24 * 64; /* 24 items * 64 char per item */
++ ssize_t ret;
++
++ buf = kzalloc(bufsz, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "Interrupt Statistics Report:\n");
++
++ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
++ isr_stats->hw);
++ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
++ isr_stats->sw);
++ if (isr_stats->sw || isr_stats->hw) {
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "\tLast Restarting Code: 0x%X\n",
++ isr_stats->err_code);
++ }
++#ifdef CONFIG_IWLWIFI_DEBUG
++ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
++ isr_stats->sch);
++ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
++ isr_stats->alive);
++#endif
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
++
++ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
++ isr_stats->ctkill);
++
++ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
++ isr_stats->wakeup);
++
++ pos += scnprintf(buf + pos, bufsz - pos,
++ "Rx command responses:\t\t %u\n", isr_stats->rx);
++
++ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
++ isr_stats->tx);
++
++ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
++ isr_stats->unhandled);
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++ kfree(buf);
++ return ret;
++}
++
++static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
++
++ char buf[8];
++ int buf_size;
++ u32 reset_flag;
++
++ memset(buf, 0, sizeof(buf));
++ buf_size = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, buf_size))
++ return -EFAULT;
++ if (sscanf(buf, "%x", &reset_flag) != 1)
++ return -EFAULT;
++ if (reset_flag == 0)
++ memset(isr_stats, 0, sizeof(*isr_stats));
++
++ return count;
++}
++
++static ssize_t iwl_dbgfs_csr_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ char buf[8];
++ int buf_size;
++ int csr;
++
++ memset(buf, 0, sizeof(buf));
++ buf_size = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, buf_size))
++ return -EFAULT;
++ if (sscanf(buf, "%d", &csr) != 1)
++ return -EFAULT;
++
++ iwl_pcie_dump_csr(trans);
++
++ return count;
++}
++
++static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_trans *trans = file->private_data;
++ char *buf = NULL;
++ ssize_t ret;
++
++ ret = iwl_dump_fh(trans, &buf);
++ if (ret < 0)
++ return ret;
++ if (!buf)
++ return -EINVAL;
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++ kfree(buf);
++ return ret;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
++DEBUGFS_READ_FILE_OPS(fh_reg);
++DEBUGFS_READ_FILE_OPS(rx_queue);
++DEBUGFS_READ_FILE_OPS(tx_queue);
++DEBUGFS_WRITE_FILE_OPS(csr);
++
++/* Create the debugfs files and directories */
++int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
++{
++ struct dentry *dir = trans->dbgfs_dir;
++
++ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
++ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
++ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
++ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
++ return 0;
++
++err:
++ IWL_ERR(trans, "failed to create the trans debugfs entry\n");
++ return -ENOMEM;
++}
++#endif /*CONFIG_IWLWIFI_DEBUGFS */
++
++static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
++{
++ u32 cmdlen = 0;
++ int i;
++
++ for (i = 0; i < IWL_NUM_OF_TBS; i++)
++ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
++
++ return cmdlen;
++}
++
++static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
++ struct iwl_fw_error_dump_data **data,
++ int allocated_rb_nums)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
++ struct iwl_rxq *rxq = &trans_pcie->rxq;
++ u32 i, r, j, rb_len = 0;
++
++ spin_lock(&rxq->lock);
++
++ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
++
++ for (i = rxq->read, j = 0;
++ i != r && j < allocated_rb_nums;
++ i = (i + 1) & RX_QUEUE_MASK, j++) {
++ struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
++ struct iwl_fw_error_dump_rb *rb;
++
++ dma_unmap_page(trans->dev, rxb->page_dma, max_len,
++ DMA_FROM_DEVICE);
++
++ rb_len += sizeof(**data) + sizeof(*rb) + max_len;
++
++ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
++ (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
++ rb = (void *)(*data)->data;
++ rb->index = cpu_to_le32(i);
++ memcpy(rb->data, page_address(rxb->page), max_len);
++ /* remap the page for the free benefit */
++ rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
++ max_len,
++ DMA_FROM_DEVICE);
++
++ *data = iwl_fw_error_next_data(*data);
++ }
++
++ spin_unlock(&rxq->lock);
++
++ return rb_len;
++}
++#define IWL_CSR_TO_DUMP (0x250)
++
++static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
++ struct iwl_fw_error_dump_data **data)
++{
++ u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
++ __le32 *val;
++ int i;
++
++ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
++ (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
++ val = (void *)(*data)->data;
++
++ for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
++ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
++
++ *data = iwl_fw_error_next_data(*data);
++
++ return csr_len;
++}
++
++static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
++ struct iwl_fw_error_dump_data **data)
++{
++ u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
++ unsigned long flags;
++ __le32 *val;
++ int i;
++
++ if (!iwl_trans_grab_nic_access(trans, &flags))
++ return 0;
++
++ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
++ (*data)->len = cpu_to_le32(fh_regs_len);
++ val = (void *)(*data)->data;
++
++ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
++ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
++
++ iwl_trans_release_nic_access(trans, &flags);
++
++ *data = iwl_fw_error_next_data(*data);
++
++ return sizeof(**data) + fh_regs_len;
++}
++
++static u32
++iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
++ struct iwl_fw_error_dump_fw_mon *fw_mon_data,
++ u32 monitor_len)
++{
++ u32 buf_size_in_dwords = (monitor_len >> 2);
++ u32 *buffer = (u32 *)fw_mon_data->data;
++ unsigned long flags;
++ u32 i;
++
++ if (!iwl_trans_grab_nic_access(trans, &flags))
++ return 0;
++
++ iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
++ for (i = 0; i < buf_size_in_dwords; i++)
++ buffer[i] = iwl_read_prph_no_grab(trans,
++ MON_DMARB_RD_DATA_ADDR);
++ iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
++
++ iwl_trans_release_nic_access(trans, &flags);
++
++ return monitor_len;
++}
++
++static u32
++iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
++ struct iwl_fw_error_dump_data **data,
++ u32 monitor_len)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ u32 len = 0;
++
++ if ((trans_pcie->fw_mon_page &&
++ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
++ trans->dbg_dest_tlv) {
++ struct iwl_fw_error_dump_fw_mon *fw_mon_data;
++ u32 base, write_ptr, wrap_cnt;
++
++ /* If there was a dest TLV - use the values from there */
++ if (trans->dbg_dest_tlv) {
++ write_ptr =
++ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
++ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
++ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
++ } else {
++ base = MON_BUFF_BASE_ADDR;
++ write_ptr = MON_BUFF_WRPTR;
++ wrap_cnt = MON_BUFF_CYCLE_CNT;
++ }
++
++ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
++ fw_mon_data = (void *)(*data)->data;
++ fw_mon_data->fw_mon_wr_ptr =
++ cpu_to_le32(iwl_read_prph(trans, write_ptr));
++ fw_mon_data->fw_mon_cycle_cnt =
++ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
++ fw_mon_data->fw_mon_base_ptr =
++ cpu_to_le32(iwl_read_prph(trans, base));
++
++ len += sizeof(**data) + sizeof(*fw_mon_data);
++ if (trans_pcie->fw_mon_page) {
++ /*
++ * The firmware is now asserted, it won't write anything
++ * to the buffer. CPU can take ownership to fetch the
++ * data. The buffer will be handed back to the device
++ * before the firmware will be restarted.
++ */
++ dma_sync_single_for_cpu(trans->dev,
++ trans_pcie->fw_mon_phys,
++ trans_pcie->fw_mon_size,
++ DMA_FROM_DEVICE);
++ memcpy(fw_mon_data->data,
++ page_address(trans_pcie->fw_mon_page),
++ trans_pcie->fw_mon_size);
++
++ monitor_len = trans_pcie->fw_mon_size;
++ } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
++ /*
++ * Update pointers to reflect actual values after
++ * shifting
++ */
++ base = iwl_read_prph(trans, base) <<
++ trans->dbg_dest_tlv->base_shift;
++ iwl_trans_read_mem(trans, base, fw_mon_data->data,
++ monitor_len / sizeof(u32));
++ } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
++ monitor_len =
++ iwl_trans_pci_dump_marbh_monitor(trans,
++ fw_mon_data,
++ monitor_len);
++ } else {
++ /* Didn't match anything - output no monitor data */
++ monitor_len = 0;
++ }
++
++ len += monitor_len;
++ (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
++ }
++
++ return len;
++}
++
++static struct iwl_trans_dump_data
++*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
++ const struct iwl_fw_dbg_trigger_tlv *trigger)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct iwl_fw_error_dump_data *data;
++ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
++ struct iwl_fw_error_dump_txcmd *txcmd;
++ struct iwl_trans_dump_data *dump_data;
++ u32 len, num_rbs;
++ u32 monitor_len;
++ int i, ptr;
++ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
++
++ /* transport dump header */
++ len = sizeof(*dump_data);
++
++ /* host commands */
++ len += sizeof(*data) +
++ cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
++
++ /* FW monitor */
++ if (trans_pcie->fw_mon_page) {
++ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
++ trans_pcie->fw_mon_size;
++ monitor_len = trans_pcie->fw_mon_size;
++ } else if (trans->dbg_dest_tlv) {
++ u32 base, end;
++
++ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
++ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
++
++ base = iwl_read_prph(trans, base) <<
++ trans->dbg_dest_tlv->base_shift;
++ end = iwl_read_prph(trans, end) <<
++ trans->dbg_dest_tlv->end_shift;
++
++ /* Make "end" point to the actual end */
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
++ trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
++ end += (1 << trans->dbg_dest_tlv->end_shift);
++ monitor_len = end - base;
++ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
++ monitor_len;
++ } else {
++ monitor_len = 0;
++ }
++
++ if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
++ dump_data = vzalloc(len);
++ if (!dump_data)
++ return NULL;
++
++ data = (void *)dump_data->data;
++ len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
++ dump_data->len = len;
++
++ return dump_data;
++ }
++
++ /* CSR registers */
++ len += sizeof(*data) + IWL_CSR_TO_DUMP;
++
++ /* FH registers */
++ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
++
++ if (dump_rbs) {
++ /* RBs */
++ num_rbs = le16_to_cpu(ACCESS_ONCE(
++ trans_pcie->rxq.rb_stts->closed_rb_num))
++ & 0x0FFF;
++ num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
++ len += num_rbs * (sizeof(*data) +
++ sizeof(struct iwl_fw_error_dump_rb) +
++ (PAGE_SIZE << trans_pcie->rx_page_order));
++ }
++
++ dump_data = vzalloc(len);
++ if (!dump_data)
++ return NULL;
++
++ len = 0;
++ data = (void *)dump_data->data;
++ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
++ txcmd = (void *)data->data;
++ spin_lock_bh(&cmdq->lock);
++ ptr = cmdq->q.write_ptr;
++ for (i = 0; i < cmdq->q.n_window; i++) {
++ u8 idx = get_cmd_index(&cmdq->q, ptr);
++ u32 caplen, cmdlen;
++
++ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
++ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
++
++ if (cmdlen) {
++ len += sizeof(*txcmd) + caplen;
++ txcmd->cmdlen = cpu_to_le32(cmdlen);
++ txcmd->caplen = cpu_to_le32(caplen);
++ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
++ txcmd = (void *)((u8 *)txcmd->data + caplen);
++ }
++
++ ptr = iwl_queue_dec_wrap(ptr);
++ }
++ spin_unlock_bh(&cmdq->lock);
++
++ data->len = cpu_to_le32(len);
++ len += sizeof(*data);
++ data = iwl_fw_error_next_data(data);
++
++ len += iwl_trans_pcie_dump_csr(trans, &data);
++ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
++ if (dump_rbs)
++ len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
++
++ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
++
++ dump_data->len = len;
++
++ return dump_data;
++}
++
++static const struct iwl_trans_ops trans_ops_pcie = {
++ .start_hw = iwl_trans_pcie_start_hw,
++ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
++ .fw_alive = iwl_trans_pcie_fw_alive,
++ .start_fw = iwl_trans_pcie_start_fw,
++ .stop_device = iwl_trans_pcie_stop_device,
++
++ .d3_suspend = iwl_trans_pcie_d3_suspend,
++ .d3_resume = iwl_trans_pcie_d3_resume,
++
++ .send_cmd = iwl_trans_pcie_send_hcmd,
++
++ .tx = iwl_trans_pcie_tx,
++ .reclaim = iwl_trans_pcie_reclaim,
++
++ .txq_disable = iwl_trans_pcie_txq_disable,
++ .txq_enable = iwl_trans_pcie_txq_enable,
++
++ .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
++ .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
++ .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
++
++ .write8 = iwl_trans_pcie_write8,
++ .write32 = iwl_trans_pcie_write32,
++ .read32 = iwl_trans_pcie_read32,
++ .read_prph = iwl_trans_pcie_read_prph,
++ .write_prph = iwl_trans_pcie_write_prph,
++ .read_mem = iwl_trans_pcie_read_mem,
++ .write_mem = iwl_trans_pcie_write_mem,
++ .configure = iwl_trans_pcie_configure,
++ .set_pmi = iwl_trans_pcie_set_pmi,
++ .grab_nic_access = iwl_trans_pcie_grab_nic_access,
++ .release_nic_access = iwl_trans_pcie_release_nic_access,
++ .set_bits_mask = iwl_trans_pcie_set_bits_mask,
++
++ .ref = iwl_trans_pcie_ref,
++ .unref = iwl_trans_pcie_unref,
++
++ .dump_data = iwl_trans_pcie_dump_data,
++};
++
++struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
++ const struct pci_device_id *ent,
++ const struct iwl_cfg *cfg)
++{
++ struct iwl_trans_pcie *trans_pcie;
++ struct iwl_trans *trans;
++ u16 pci_cmd;
++ int ret;
++
++ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
++ &pdev->dev, cfg, &trans_ops_pcie, 0);
++ if (!trans)
++ return ERR_PTR(-ENOMEM);
++
++ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
++
++ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ trans_pcie->trans = trans;
++ spin_lock_init(&trans_pcie->irq_lock);
++ spin_lock_init(&trans_pcie->reg_lock);
++ spin_lock_init(&trans_pcie->ref_lock);
++ mutex_init(&trans_pcie->mutex);
++ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
++ trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
++ if (!trans_pcie->tso_hdr_page) {
++ ret = -ENOMEM;
++ goto out_no_pci;
++ }
++
++ ret = pci_enable_device(pdev);
++ if (ret)
++ goto out_no_pci;
++
++ if (!cfg->base_params->pcie_l1_allowed) {
++ /*
++ * W/A - seems to solve weird behavior. We need to remove this
++ * if we don't want to stay in L1 all the time. This wastes a
++ * lot of power.
++ */
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
++ PCIE_LINK_STATE_L1 |
++ PCIE_LINK_STATE_CLKPM);
++ }
++
++ pci_set_master(pdev);
++
++ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
++ if (!ret)
++ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
++ if (ret) {
++ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (!ret)
++ ret = pci_set_consistent_dma_mask(pdev,
++ DMA_BIT_MASK(32));
++ /* both attempts failed: */
++ if (ret) {
++ dev_err(&pdev->dev, "No suitable DMA available\n");
++ goto out_pci_disable_device;
++ }
++ }
++
++ ret = pci_request_regions(pdev, DRV_NAME);
++ if (ret) {
++ dev_err(&pdev->dev, "pci_request_regions failed\n");
++ goto out_pci_disable_device;
++ }
++
++ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
++ if (!trans_pcie->hw_base) {
++ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
++ ret = -ENODEV;
++ goto out_pci_release_regions;
++ }
++
++ /* We disable the RETRY_TIMEOUT register (0x41) to keep
++ * PCI Tx retries from interfering with C3 CPU state */
++ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
++
++ trans->dev = &pdev->dev;
++ trans_pcie->pci_dev = pdev;
++ iwl_disable_interrupts(trans);
++
++ ret = pci_enable_msi(pdev);
++ if (ret) {
++ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
++ /* enable rfkill interrupt: hw bug w/a */
++ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
++ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
++ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
++ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
++ }
++ }
++
++ trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
++ /*
++ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
++ * changed, and now the revision step also includes bit 0-1 (no more
++ * "dash" value). To keep hw_rev backwards compatible - we'll store it
++ * in the old format.
++ */
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
++ unsigned long flags;
++
++ trans->hw_rev = (trans->hw_rev & 0xfff0) |
++ (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
++
++ ret = iwl_pcie_prepare_card_hw(trans);
++ if (ret) {
++ IWL_WARN(trans, "Exit HW not ready\n");
++ goto out_pci_disable_msi;
++ }
++
++ /*
++ * in-order to recognize C step driver should read chip version
++ * id located at the AUX bus MISC address space.
++ */
++ iwl_set_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++ udelay(2);
++
++ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++ 25000);
++ if (ret < 0) {
++ IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
++ goto out_pci_disable_msi;
++ }
++
++ if (iwl_trans_grab_nic_access(trans, &flags)) {
++ u32 hw_step;
++
++ hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
++ hw_step |= ENABLE_WFPM;
++ iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
++ hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
++ hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
++ if (hw_step == 0x3)
++ trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
++ (SILICON_C_STEP << 2);
++ iwl_trans_release_nic_access(trans, &flags);
++ }
++ }
++
++ trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
++ snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
++ "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
++
++ /* Initialize the wait queue for commands */
++ init_waitqueue_head(&trans_pcie->wait_command_queue);
++
++ ret = iwl_pcie_alloc_ict(trans);
++ if (ret)
++ goto out_pci_disable_msi;
++
++ ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
++ iwl_pcie_irq_handler,
++ IRQF_SHARED, DRV_NAME, trans);
++ if (ret) {
++ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
++ goto out_free_ict;
++ }
++
++ trans_pcie->inta_mask = CSR_INI_SET_MASK;
++
++ return trans;
++
++out_free_ict:
++ iwl_pcie_free_ict(trans);
++out_pci_disable_msi:
++ pci_disable_msi(pdev);
++out_pci_release_regions:
++ pci_release_regions(pdev);
++out_pci_disable_device:
++ pci_disable_device(pdev);
++out_no_pci:
++ free_percpu(trans_pcie->tso_hdr_page);
++ iwl_trans_free(trans);
++ return ERR_PTR(ret);
++}
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 88bf80a942b4..9faf69875fab 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -382,6 +382,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+@@ -399,10 +400,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 1de80a8e357a..840c47d8e2ce 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -7,6 +7,7 @@
+ *
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+@@ -33,6 +34,7 @@
+ *
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+@@ -881,9 +883,16 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ trans_pcie->fw_mon_phys >> dest->base_shift);
+- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+- (trans_pcie->fw_mon_phys +
+- trans_pcie->fw_mon_size) >> dest->end_shift);
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size - 256) >>
++ dest->end_shift);
++ else
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size) >>
++ dest->end_shift);
+ }
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index f46c9d7f6528..7f471bff435c 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ hw_queue);
+ if (rx_remained_cnt == 0)
+ return;
+-
++ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
++ rtlpci->rx_ring[rxring_idx].idx];
++ pdesc = (struct rtl_rx_desc *)skb->data;
+ } else { /* rx descriptor */
+ pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb))
+ goto no_new;
+- if (rtlpriv->use_new_trx_flow) {
+- buffer_desc =
+- &rtlpci->rx_ring[rxring_idx].buffer_desc
+- [rtlpci->rx_ring[rxring_idx].idx];
+- /*means rx wifi info*/
+- pdesc = (struct rtl_rx_desc *)skb->data;
+- }
+ memset(&rx_status , 0 , sizeof(rx_status));
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ &rx_status, (u8 *)pdesc, skb);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+index 11344121c55e..47e32cb0ec1a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ u8 tid;
+
+ rtl8188ee_bt_reg_init(hw);
+- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+-
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
++ rtlpriv->cfg->mod_params->disable_watchdog =
++ rtlpriv->cfg->mod_params->disable_watchdog;
+ if (rtlpriv->cfg->mod_params->disable_watchdog)
+ pr_info("watchdog disabled\n");
+ if (!rtlpriv->psc.inactiveps)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+index de6cb6c3a48c..4780bdc63b2b 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("rtl8192ce: Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index fd4a5353d216..7c6f7f0d18c6 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->dm.disable_framebursting = false;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+index b19d0398215f..c6e09a19de1a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+index e1fd27c888bf..31baca41ac2f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 2721cf89fb16..aac1ed3f7bb4 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+ ieee80211_rx(hw, skb);
+ else
+ dev_kfree_skb_any(skb);
++ } else {
++ dev_kfree_skb_any(skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
+index 0305729d0986..10cf3747694d 100644
+--- a/drivers/net/wireless/ti/wlcore/io.h
++++ b/drivers/net/wireless/ti/wlcore/io.h
+@@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
+
+ static inline void wl1271_power_off(struct wl1271 *wl)
+ {
+- int ret;
++ int ret = 0;
+
+ if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
+ return;
+
+- ret = wl->if_ops->power(wl->dev, false);
++ if (wl->if_ops->power)
++ ret = wl->if_ops->power(wl->dev, false);
+ if (!ret)
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+
+ static inline int wl1271_power_on(struct wl1271 *wl)
+ {
+- int ret = wl->if_ops->power(wl->dev, true);
++ int ret = 0;
++
++ if (wl->if_ops->power)
++ ret = wl->if_ops->power(wl->dev, true);
+ if (ret == 0)
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
+index f1ac2839d97c..720e4e4b5a3c 100644
+--- a/drivers/net/wireless/ti/wlcore/spi.c
++++ b/drivers/net/wireless/ti/wlcore/spi.c
+@@ -73,7 +73,10 @@
+ */
+ #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+
+-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
++/* Maximum number of SPI write chunks */
++#define WSPI_MAX_NUM_OF_CHUNKS \
++ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
++
+
+ struct wl12xx_spi_glue {
+ struct device *dev;
+@@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
+ {
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+- struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
++ /* SPI write buffers - 2 for each chunk */
++ struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ struct spi_message m;
+- u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
++ u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
+ u32 *cmd;
+ u32 chunk_len;
+ int i;
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index d3346d23963b..89b3befc7155 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ type_mask |= IORESOURCE_TYPE_BITS;
+
+ pci_bus_for_each_resource(bus, r, i) {
++ resource_size_t min_used = min;
++
+ if (!r)
+ continue;
+
+@@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ * overrides "min".
+ */
+ if (avail.start)
+- min = avail.start;
++ min_used = avail.start;
+
+ max = avail.end;
+
+ /* Ok, try it out.. */
+- ret = allocate_resource(r, res, size, min, max,
++ ret = allocate_resource(r, res, size, min_used, max,
+ align, alignf, alignf_data);
+ if (ret == 0)
+ return 0;
+diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
+index 2d57e19a2cd4..b5ae685aec61 100644
+--- a/drivers/pci/host/pci-dra7xx.c
++++ b/drivers/pci/host/pci-dra7xx.c
+@@ -289,7 +289,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->irq,
+- dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
++ dra7xx_pcie_msi_irq_handler,
++ IRQF_SHARED | IRQF_NO_THREAD,
+ "dra7-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
+index c139237e0e52..5b2b83cb67ad 100644
+--- a/drivers/pci/host/pci-exynos.c
++++ b/drivers/pci/host/pci-exynos.c
+@@ -527,7 +527,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+- IRQF_SHARED, "exynos-pcie", pp);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
+index fdb95367721e..ebcb0ac8512b 100644
+--- a/drivers/pci/host/pci-imx6.c
++++ b/drivers/pci/host/pci-imx6.c
+@@ -534,7 +534,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ imx6_pcie_msi_handler,
+- IRQF_SHARED, "mx6-pcie-msi", pp);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ "mx6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return -ENODEV;
+diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
+index 00e92720d7f7..d9789d6ba47d 100644
+--- a/drivers/pci/host/pci-tegra.c
++++ b/drivers/pci/host/pci-tegra.c
+@@ -1304,7 +1304,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+
+ msi->irq = err;
+
+- err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
++ err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
+ tegra_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
+index c086210f2ffd..56ce5640d91a 100644
+--- a/drivers/pci/host/pcie-rcar.c
++++ b/drivers/pci/host/pcie-rcar.c
+@@ -695,14 +695,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
+index 020d78890719..4ea793eaa2bd 100644
+--- a/drivers/pci/host/pcie-spear13xx.c
++++ b/drivers/pci/host/pcie-spear13xx.c
+@@ -281,7 +281,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+- IRQF_SHARED, "spear1340-pcie", pp);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ "spear1340-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
+index f1a06a091ccb..577fe5b2f617 100644
+--- a/drivers/pci/host/pcie-xilinx.c
++++ b/drivers/pci/host/pcie-xilinx.c
+@@ -776,7 +776,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+
+ port->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+- IRQF_SHARED, "xilinx-pcie", port);
++ IRQF_SHARED | IRQF_NO_THREAD,
++ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n", port->irq);
+ return err;
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index bcb90e4888dd..b60309ee80ed 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -954,8 +954,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
+ {
+ pci_lock_rescan_remove();
+
+- if (slot->flags & SLOT_IS_GOING_AWAY)
++ if (slot->flags & SLOT_IS_GOING_AWAY) {
++ pci_unlock_rescan_remove();
+ return -ENODEV;
++ }
+
+ /* configure all functions */
+ if (!(slot->flags & SLOT_ENABLED))
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index cd78f1166b33..9a92d13e3917 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -845,6 +845,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ },
+ },
+ {
++ .ident = "Lenovo ideapad Y700-17ISK",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
++ },
++ },
++ {
+ .ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -865,6 +872,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"),
+ },
+ },
++ {
++ .ident = "Lenovo Yoga 700",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
++ },
++ },
++ {
++ .ident = "Lenovo Yoga 900",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 9f77d23239a2..64ed88a67e6e 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -227,6 +227,7 @@ static struct {
+ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ {"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
++ {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 11ea52b2c36b..c66fd23b3c13 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3141,8 +3141,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ int ret = 0;
+
+- if (!sdkp)
+- return 0; /* this can happen */
++ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
++ return 0;
+
+ if (sdkp->WCE && sdkp->media_present) {
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+@@ -3181,6 +3181,9 @@ static int sd_resume(struct device *dev)
+ {
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
++ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
++ return 0;
++
+ if (!sdkp->device->manage_start_stop)
+ return 0;
+
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 9d7b7db75e4b..3bbf4853733c 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1255,7 +1255,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ }
+
+ sfp->mmap_called = 1;
+- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = sfp;
+ vma->vm_ops = &sg_mmap_vm_ops;
+ return 0;
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 8bd54a64efd6..64c867405ad4 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
+ {
+ struct scsi_cd *cd = dev_get_drvdata(dev);
+
++ if (!cd) /* E.g.: runtime suspend following sr_remove() */
++ return 0;
++
+ if (cd->media_present)
+ return -EBUSY;
+ else
+@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
+ scsi_autopm_get_device(cd->device);
+
+ del_gendisk(cd->disk);
++ dev_set_drvdata(dev, NULL);
+
+ mutex_lock(&sr_ref_mutex);
+ kref_put(&cd->kref, sr_kref_release);
+diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
+index a0315701c7d9..ed68b2cfe031 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -141,7 +141,9 @@ static void __speakup_paste_selection(struct work_struct *work)
+ struct tty_ldisc *ld;
+ DECLARE_WAITQUEUE(wait, current);
+
+- ld = tty_ldisc_ref_wait(tty);
++ ld = tty_ldisc_ref(tty);
++ if (!ld)
++ goto tty_unref;
+ tty_buffer_lock_exclusive(&vc->port);
+
+ add_wait_queue(&vc->paste_wait, &wait);
+@@ -161,6 +163,7 @@ static void __speakup_paste_selection(struct work_struct *work)
+
+ tty_buffer_unlock_exclusive(&vc->port);
+ tty_ldisc_deref(ld);
++tty_unref:
+ tty_kref_put(tty);
+ }
+
+diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
+index 1d9d51bdf517..f41a7da1949d 100644
+--- a/drivers/staging/speakup/serialio.c
++++ b/drivers/staging/speakup/serialio.c
+@@ -6,6 +6,11 @@
+ #include "spk_priv.h"
+ #include "serialio.h"
+
++#include <linux/serial_core.h>
++/* WARNING: Do not change this to <linux/serial.h> without testing that
++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
++#include <asm/serial.h>
++
+ #ifndef SERIAL_PORT_DFNS
+ #define SERIAL_PORT_DFNS
+ #endif
+@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
+ int baud = 9600, quot = 0;
+ unsigned int cval = 0;
+ int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
+- const struct old_serial_port *ser = rs_table + index;
++ const struct old_serial_port *ser;
+ int err;
+
++ if (index >= ARRAY_SIZE(rs_table)) {
++ pr_info("no port info for ttyS%d\n", index);
++ return NULL;
++ }
++ ser = rs_table + index;
++
+ /* Divisor, bytesize and parity */
+ quot = ser->baud_base / baud;
+ cval = cflag & (CSIZE | CSTOPB);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 6f2fb546477e..5a8add721741 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1907,7 +1907,8 @@ static void lio_tpg_release_fabric_acl(
+ }
+
+ /*
+- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
++ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
++ * or not held.
+ *
+ * Also, this function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+@@ -1915,19 +1916,32 @@ static void lio_tpg_release_fabric_acl(
+ static int lio_tpg_shutdown_session(struct se_session *se_sess)
+ {
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
++ struct se_portal_group *se_tpg = se_sess->se_tpg;
++ bool local_lock = false;
++
++ if (!spin_is_locked(&se_tpg->session_lock)) {
++ spin_lock_irq(&se_tpg->session_lock);
++ local_lock = true;
++ }
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
++ if (local_lock)
++ spin_unlock_irq(&sess->conn_lock);
+ return 0;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_stop_time2retain_timer(sess);
++ spin_unlock_irq(&se_tpg->session_lock);
++
+ iscsit_stop_session(sess, 1, 1);
++ if (!local_lock)
++ spin_lock_irq(&se_tpg->session_lock);
+
+ return 1;
+ }
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index 5a0f12d08e8b..ec4ea5940bf7 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ next_target = instance->target;
+ dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
+
++ if (!instance->initialized) {
++ if (throttle) {
++ next_target = (cur_state + 1) >= instance->upper ?
++ instance->upper :
++ ((cur_state + 1) < instance->lower ?
++ instance->lower : (cur_state + 1));
++ } else {
++ next_target = THERMAL_NO_TARGET;
++ }
++
++ return next_target;
++ }
++
+ switch (trend) {
+ case THERMAL_TREND_RAISING:
+ if (throttle) {
+@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ old_target, (int)instance->target);
+
+- if (old_target == instance->target)
++ if (instance->initialized && old_target == instance->target)
+ continue;
+
+ /* Activate a passive thermal instance */
+@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ instance->target == THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, -1);
+
+-
++ instance->initialized = true;
+ instance->cdev->updated = false; /* cdev needs update */
+ }
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 4108db7e10c1..a3282bfb343d 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -37,6 +37,7 @@
+ #include <linux/of.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
++#include <linux/suspend.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/thermal.h>
+@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
+ static DEFINE_MUTEX(thermal_list_lock);
+ static DEFINE_MUTEX(thermal_governor_lock);
+
++static atomic_t in_suspend;
++
+ static struct thermal_governor *def_governor;
+
+ static struct thermal_governor *__find_governor(const char *name)
+@@ -471,14 +474,31 @@ static void update_temperature(struct thermal_zone_device *tz)
+ mutex_unlock(&tz->lock);
+
+ trace_thermal_temperature(tz);
+- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+- tz->last_temperature, tz->temperature);
++ if (tz->last_temperature == THERMAL_TEMP_INVALID)
++ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
++ tz->temperature);
++ else
++ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
++ tz->last_temperature, tz->temperature);
++}
++
++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++{
++ struct thermal_instance *pos;
++
++ tz->temperature = THERMAL_TEMP_INVALID;
++ tz->passive = 0;
++ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
++ pos->initialized = false;
+ }
+
+ void thermal_zone_device_update(struct thermal_zone_device *tz)
+ {
+ int count;
+
++ if (atomic_read(&in_suspend))
++ return;
++
+ if (!tz->ops->get_temp)
+ return;
+
+@@ -1016,6 +1036,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (!result) {
+ list_add_tail(&dev->tz_node, &tz->thermal_instances);
+ list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
++ atomic_set(&tz->need_update, 1);
+ }
+ mutex_unlock(&cdev->lock);
+ mutex_unlock(&tz->lock);
+@@ -1122,6 +1143,7 @@ __thermal_cooling_device_register(struct device_node *np,
+ const struct thermal_cooling_device_ops *ops)
+ {
+ struct thermal_cooling_device *cdev;
++ struct thermal_zone_device *pos = NULL;
+ int result;
+
+ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+@@ -1166,6 +1188,12 @@ __thermal_cooling_device_register(struct device_node *np,
+ /* Update binding information for 'this' new cdev */
+ bind_cdev(cdev);
+
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(pos, &thermal_tz_list, node)
++ if (atomic_cmpxchg(&pos->need_update, 1, 0))
++ thermal_zone_device_update(pos);
++ mutex_unlock(&thermal_list_lock);
++
+ return cdev;
+ }
+
+@@ -1496,6 +1524,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ tz->trips = trips;
+ tz->passive_delay = passive_delay;
+ tz->polling_delay = polling_delay;
++ /* A new thermal zone needs to be updated anyway. */
++ atomic_set(&tz->need_update, 1);
+
+ dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ result = device_register(&tz->device);
+@@ -1576,7 +1606,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ if (!tz->ops->get_temp)
+ thermal_zone_device_set_polling(tz, 0);
+
+- thermal_zone_device_update(tz);
++ thermal_zone_device_reset(tz);
++ /* Update the new thermal zone and mark it as already updated. */
++ if (atomic_cmpxchg(&tz->need_update, 1, 0))
++ thermal_zone_device_update(tz);
+
+ return tz;
+
+@@ -1810,6 +1843,36 @@ static void thermal_unregister_governors(void)
+ thermal_gov_user_space_unregister();
+ }
+
++static int thermal_pm_notify(struct notifier_block *nb,
++ unsigned long mode, void *_unused)
++{
++ struct thermal_zone_device *tz;
++
++ switch (mode) {
++ case PM_HIBERNATION_PREPARE:
++ case PM_RESTORE_PREPARE:
++ case PM_SUSPEND_PREPARE:
++ atomic_set(&in_suspend, 1);
++ break;
++ case PM_POST_HIBERNATION:
++ case PM_POST_RESTORE:
++ case PM_POST_SUSPEND:
++ atomic_set(&in_suspend, 0);
++ list_for_each_entry(tz, &thermal_tz_list, node) {
++ thermal_zone_device_reset(tz);
++ thermal_zone_device_update(tz);
++ }
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static struct notifier_block thermal_pm_nb = {
++ .notifier_call = thermal_pm_notify,
++};
++
+ static int __init thermal_init(void)
+ {
+ int result;
+@@ -1830,6 +1893,11 @@ static int __init thermal_init(void)
+ if (result)
+ goto exit_netlink;
+
++ result = register_pm_notifier(&thermal_pm_nb);
++ if (result)
++ pr_warn("Thermal: Can not register suspend notifier, return %d\n",
++ result);
++
+ return 0;
+
+ exit_netlink:
+@@ -1849,6 +1917,7 @@ error:
+
+ static void __exit thermal_exit(void)
+ {
++ unregister_pm_notifier(&thermal_pm_nb);
+ of_thermal_destroy_zones();
+ genetlink_exit();
+ class_unregister(&thermal_class);
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index 8e391812e503..dce86ee8e9d7 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -41,6 +41,7 @@ struct thermal_instance {
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ int trip;
++ bool initialized;
+ unsigned long upper; /* Highest cooling state for this trip point */
+ unsigned long lower; /* Lowest cooling state for this trip point */
+ unsigned long target; /* expected cooling state */
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index e5edf45e9d4c..33088c70ef3b 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -258,16 +258,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
+
+ static void n_tty_check_unthrottle(struct tty_struct *tty)
+ {
+- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+- tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
++ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
+ if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
+ return;
+ if (!tty->count)
+ return;
+ n_tty_kick_worker(tty);
+- n_tty_write_wakeup(tty->link);
+- if (waitqueue_active(&tty->link->write_wait))
+- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
++ tty_wakeup(tty->link);
+ return;
+ }
+
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index eb8adc2e68c1..2fd163b75665 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1380,6 +1380,9 @@ ce4100_serial_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
+ #define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+
++#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
++#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
++
+ #define BYT_PRV_CLK 0x800
+ #define BYT_PRV_CLK_EN (1 << 0)
+ #define BYT_PRV_CLK_M_VAL_SHIFT 1
+@@ -1458,11 +1461,13 @@ byt_serial_setup(struct serial_private *priv,
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_UART1:
+ case PCI_DEVICE_ID_INTEL_BSW_UART1:
++ case PCI_DEVICE_ID_INTEL_BDW_UART1:
+ rx_param->src_id = 3;
+ tx_param->dst_id = 2;
+ break;
+ case PCI_DEVICE_ID_INTEL_BYT_UART2:
+ case PCI_DEVICE_ID_INTEL_BSW_UART2:
++ case PCI_DEVICE_ID_INTEL_BDW_UART2:
+ rx_param->src_id = 5;
+ tx_param->dst_id = 4;
+ break;
+@@ -2154,6 +2159,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = byt_serial_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = byt_serial_setup,
++ },
+ /*
+ * ITE
+ */
+@@ -5603,6 +5622,16 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+
++ /* Intel Broadwell */
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
++ pbn_byt },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
++ pbn_byt },
++
+ /*
+ * Intel Penwell
+ */
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 5a5c1ab5a375..be96970646a9 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2670,6 +2670,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
+ }
+
+ /**
++ * tiocgetd - get line discipline
++ * @tty: tty device
++ * @p: pointer to user data
++ *
++ * Retrieves the line discipline id directly from the ldisc.
++ *
++ * Locking: waits for ldisc reference (in case the line discipline
++ * is changing or the tty is being hungup)
++ */
++
++static int tiocgetd(struct tty_struct *tty, int __user *p)
++{
++ struct tty_ldisc *ld;
++ int ret;
++
++ ld = tty_ldisc_ref_wait(tty);
++ ret = put_user(ld->ops->num, p);
++ tty_ldisc_deref(ld);
++ return ret;
++}
++
++/**
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
+@@ -2895,7 +2917,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ case TIOCGSID:
+ return tiocgsid(tty, real_tty, p);
+ case TIOCGETD:
+- return put_user(tty->ldisc->ops->num, (int __user *)p);
++ return tiocgetd(tty, p);
+ case TIOCSETD:
+ return tiocsetd(tty, p);
+ case TIOCVHANGUP:
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0fe15aec7ed0..df3deb000a80 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -432,7 +432,8 @@ static void acm_read_bulk_callback(struct urb *urb)
+ set_bit(rb->index, &acm->read_urbs_free);
+ dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
+ __func__, status);
+- return;
++ if ((status != -ENOENT) || (urb->actual_length == 0))
++ return;
+ }
+
+ usb_mark_last_busy(acm->dev);
+@@ -1414,6 +1415,8 @@ made_compressed_probe:
+ usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ NULL, acm->writesize, acm_write_bulk, snd);
+ snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ if (quirks & SEND_ZERO_PACKET)
++ snd->urb->transfer_flags |= URB_ZERO_PACKET;
+ snd->instance = acm;
+ }
+
+@@ -1848,6 +1851,11 @@ static const struct usb_device_id acm_ids[] = {
+ },
+ #endif
+
++ /*Samsung phone in firmware update mode */
++ { USB_DEVICE(0x04e8, 0x685d),
++ .driver_info = IGNORE_DEVICE,
++ },
++
+ /* Exclude Infineon Flash Loader utility */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = IGNORE_DEVICE,
+@@ -1871,6 +1879,10 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_ACM_PROTO_AT_CDMA) },
+
++ { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
++ .driver_info = SEND_ZERO_PACKET,
++ },
++
+ { }
+ };
+
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index b3b6c9db6fe5..ac830e0ae38b 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -134,3 +134,4 @@ struct acm {
+ #define IGNORE_DEVICE BIT(5)
+ #define QUIRK_CONTROL_LINE_STATE BIT(6)
+ #define CLEAR_HALT_CONDITIONS BIT(7)
++#define SEND_ZERO_PACKET BIT(8)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index ee11b301f3da..e56ad83b35a4 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5346,7 +5346,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ }
+
+ bos = udev->bos;
+- udev->bos = NULL;
+
+ for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+
+@@ -5439,8 +5438,11 @@ done:
+ usb_set_usb2_hardware_lpm(udev, 1);
+ usb_unlocked_enable_lpm(udev);
+ usb_enable_ltm(udev);
+- usb_release_bos_descriptor(udev);
+- udev->bos = bos;
++ /* release the new BOS descriptor allocated by hub_port_init() */
++ if (udev->bos != bos) {
++ usb_release_bos_descriptor(udev);
++ udev->bos = bos;
++ }
+ return 0;
+
+ re_enumerate:
+diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
+index 65b0b6a58599..da03d8b258dd 100644
+--- a/drivers/usb/host/Makefile
++++ b/drivers/usb/host/Makefile
+@@ -26,9 +26,6 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci/
+
+ obj-$(CONFIG_PCI) += pci-quirks.o
+
+-obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
+-obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
+-
+ obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
+ obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
+ obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
+@@ -63,6 +60,8 @@ obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
+ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
+ obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
+ obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
++obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
++obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
+ obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
+ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
+ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7e5c90eebb9c..3ff5fcc7c94b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -23,10 +23,17 @@
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/acpi.h>
+
+ #include "xhci.h"
+ #include "xhci-trace.h"
+
++#define SSIC_PORT_NUM 2
++#define SSIC_PORT_CFG2 0x880c
++#define SSIC_PORT_CFG2_OFFSET 0x30
++#define PROG_DONE (1 << 30)
++#define SSIC_PORT_UNUSED (1 << 31)
++
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+@@ -40,6 +47,7 @@
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -140,9 +148,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
++ xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ168) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -169,20 +182,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ "QUIRK: Resetting on resume");
+ }
+
+-/*
+- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+- */
+-static void xhci_pme_quirk(struct xhci_hcd *xhci)
++#ifdef CONFIG_ACPI
++static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+ {
+- u32 val;
+- void __iomem *reg;
+-
+- reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+- val = readl(reg);
+- writel(val | BIT(28), reg);
+- readl(reg);
++ static const u8 intel_dsm_uuid[] = {
++ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
++ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
++ };
++ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+ }
++#else
++ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
++#endif /* CONFIG_ACPI */
+
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+@@ -263,6 +274,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_acpi_rtd3_enable(dev);
++
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+@@ -296,10 +310,65 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ }
+
+ #ifdef CONFIG_PM
++/*
++ * In some Intel xHCI controllers, in order to get D3 working,
++ * through a vendor specific SSIC CONFIG register at offset 0x883c,
++ * SSIC PORT need to be marked as "unused" before putting xHCI
++ * into D3. After D3 exit, the SSIC port need to be marked as "used".
++ * Without this change, xHCI might not enter D3 state.
++ */
++static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
++{
++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ u32 val;
++ void __iomem *reg;
++ int i;
++
++ for (i = 0; i < SSIC_PORT_NUM; i++) {
++ reg = (void __iomem *) xhci->cap_regs +
++ SSIC_PORT_CFG2 +
++ i * SSIC_PORT_CFG2_OFFSET;
++
++ /* Notify SSIC that SSIC profile programming is not done. */
++ val = readl(reg) & ~PROG_DONE;
++ writel(val, reg);
++
++ /* Mark SSIC port as unused(suspend) or used(resume) */
++ val = readl(reg);
++ if (suspend)
++ val |= SSIC_PORT_UNUSED;
++ else
++ val &= ~SSIC_PORT_UNUSED;
++ writel(val, reg);
++
++ /* Notify SSIC that SSIC profile programming is done */
++ val = readl(reg) | PROG_DONE;
++ writel(val, reg);
++ readl(reg);
++ }
++}
++
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct usb_hcd *hcd)
++{
++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ void __iomem *reg;
++ u32 val;
++
++ reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++ val = readl(reg);
++ writel(val | BIT(28), reg);
++ readl(reg);
++}
++
+ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++ int ret;
+
+ /*
+ * Systems with the TI redriver that loses port status change events
+@@ -309,9 +378,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ pdev->no_d3cold = true;
+
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+- xhci_pme_quirk(xhci);
++ xhci_pme_quirk(hcd);
++
++ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
++ xhci_ssic_port_unused_quirk(hcd, true);
+
+- return xhci_suspend(xhci, do_wakeup);
++ ret = xhci_suspend(xhci, do_wakeup);
++ if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
++ xhci_ssic_port_unused_quirk(hcd, false);
++
++ return ret;
+ }
+
+ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+@@ -341,8 +417,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
+
++ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
++ xhci_ssic_port_unused_quirk(hcd, false);
++
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+- xhci_pme_quirk(xhci);
++ xhci_pme_quirk(hcd);
+
+ retval = xhci_resume(xhci, hibernated);
+ return retval;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 41d7a05f8af4..e6d858a49d04 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3001,21 +3001,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ }
+
+ /*
+- * The TD size is the number of bytes remaining in the TD (including this TRB),
+- * right shifted by 10.
+- * It must fit in bits 21:17, so it can't be bigger than 31.
+- */
+-static u32 xhci_td_remainder(unsigned int remainder)
+-{
+- u32 max = (1 << (21 - 17 + 1)) - 1;
+-
+- if ((remainder >> 10) >= max)
+- return max << 17;
+- else
+- return (remainder >> 10) << 17;
+-}
+-
+-/*
+ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
+ * packets remaining in the TD (*not* including this TRB).
+ *
+@@ -3027,30 +3012,36 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+- * It must fit in bits 21:17, so it can't be bigger than 31.
++ * For xHCI 0.96 and older, TD size field should be the remaining bytes
++ * including this TRB, right shifted by 10
++ *
++ * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
++ * This is taken care of in the TRB_TD_SIZE() macro
++ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+-static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+- unsigned int total_packet_count, struct urb *urb,
+- unsigned int num_trbs_left)
++static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
++ int trb_buff_len, unsigned int td_total_len,
++ struct urb *urb, unsigned int num_trbs_left)
+ {
+- int packets_transferred;
++ u32 maxp, total_packet_count;
++
++ if (xhci->hci_version < 0x100)
++ return ((td_total_len - transferred) >> 10);
++
++ maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
++ total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+ /* One TRB with a zero-length data packet. */
+- if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
++ if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
++ trb_buff_len == td_total_len)
+ return 0;
+
+- /* All the TRB queueing functions don't count the current TRB in
+- * running_total.
+- */
+- packets_transferred = (running_total + trb_buff_len) /
+- GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+-
+- if ((total_packet_count - packets_transferred) > 31)
+- return 31 << 17;
+- return (total_packet_count - packets_transferred) << 17;
++ /* Queueing functions don't count the current TRB into transferred */
++ return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+ }
+
++
+ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+ {
+@@ -3172,17 +3163,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ }
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+- if (xhci->hci_version < 0x100) {
+- remainder = xhci_td_remainder(
+- urb->transfer_buffer_length -
+- running_total);
+- } else {
+- remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb,
+- num_trbs - 1);
+- }
++ remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
++ urb->transfer_buffer_length,
++ urb, num_trbs - 1);
++
+ length_field = TRB_LEN(trb_buff_len) |
+- remainder |
++ TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ if (num_trbs > 1)
+@@ -3345,17 +3331,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ field |= TRB_ISP;
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+- if (xhci->hci_version < 0x100) {
+- remainder = xhci_td_remainder(
+- urb->transfer_buffer_length -
+- running_total);
+- } else {
+- remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb,
+- num_trbs - 1);
+- }
++ remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
++ urb->transfer_buffer_length,
++ urb, num_trbs - 1);
++
+ length_field = TRB_LEN(trb_buff_len) |
+- remainder |
++ TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ if (num_trbs > 1)
+@@ -3393,7 +3374,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct usb_ctrlrequest *setup;
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+- u32 field, length_field;
++ u32 field, length_field, remainder;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+
+@@ -3466,9 +3447,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ else
+ field = TRB_TYPE(TRB_DATA);
+
++ remainder = xhci_td_remainder(xhci, 0,
++ urb->transfer_buffer_length,
++ urb->transfer_buffer_length,
++ urb, 1);
++
+ length_field = TRB_LEN(urb->transfer_buffer_length) |
+- xhci_td_remainder(urb->transfer_buffer_length) |
++ TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
++
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+@@ -3691,17 +3678,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ trb_buff_len = td_remain_len;
+
+ /* Set the TRB length, TD size, & interrupter fields. */
+- if (xhci->hci_version < 0x100) {
+- remainder = xhci_td_remainder(
+- td_len - running_total);
+- } else {
+- remainder = xhci_v1_0_td_remainder(
+- running_total, trb_buff_len,
+- total_packet_count, urb,
+- (trbs_per_td - j - 1));
+- }
++ remainder = xhci_td_remainder(xhci, running_total,
++ trb_buff_len, td_len,
++ urb, trbs_per_td - j - 1);
++
+ length_field = TRB_LEN(trb_buff_len) |
+- remainder |
++ TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ queue_trb(xhci, ep_ring, more_trbs_coming,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f6bb118e4501..910f7fac031f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1559,7 +1559,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "HW died, freeing TD.");
+ urb_priv = urb->hcpriv;
+- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
++ for (i = urb_priv->td_cnt;
++ i < urb_priv->length && xhci->devs[urb->dev->slot_id];
++ i++) {
+ td = urb_priv->td[i];
+ if (!list_empty(&td->td_list))
+ list_del_init(&td->td_list);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 0f26dd2697b6..f18cdf0ec795 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1130,6 +1130,8 @@ enum xhci_setup_dev {
+ /* Normal TRB fields */
+ /* transfer_len bitmasks - bits 0:16 */
+ #define TRB_LEN(p) ((p) & 0x1ffff)
++/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
++#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
+ /* Interrupter Target - which MSI-X vector to target the completion event at */
+ #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
+ #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+@@ -1568,6 +1570,7 @@ struct xhci_hcd {
+ /* For controllers with a broken beyond repair streams implementation */
+ #define XHCI_BROKEN_STREAMS (1 << 19)
+ #define XHCI_PME_STUCK_QUIRK (1 << 20)
++#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 59b2126b21a3..1dd9919081f8 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
+ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
++ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
+ { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
+ { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index a5a0376bbd48..8c660ae401d8 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
++ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
+ { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+
+ /* Papouch devices based on FTDI chip */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2943b97b2a83..7850071c0ae1 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -615,6 +615,7 @@
+ */
+ #define RATOC_VENDOR_ID 0x0584
+ #define RATOC_PRODUCT_ID_USB60F 0xb020
++#define RATOC_PRODUCT_ID_SCU18 0xb03a
+
+ /*
+ * Infineon Technologies
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4021846139c9..88540596973f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -271,6 +271,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
+ #define TELIT_PRODUCT_UE910_V2 0x1012
++#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
++#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
+ #define TELIT_PRODUCT_LE920 0x1200
+ #define TELIT_PRODUCT_LE910 0x1201
+
+@@ -623,6 +625,16 @@ static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ .reserved = BIT(8) | BIT(10) | BIT(11),
+ };
+
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
++ .sendsetup = BIT(2),
++ .reserved = BIT(0) | BIT(1) | BIT(3),
++};
++
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(2) | BIT(3),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1172,6 +1184,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1691,7 +1707,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 60afb39eb73c..337a0be89fcf 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
+ (serial->num_interrupt_in == 0))
+ return 0;
+
++ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
++ dev_err(&serial->interface->dev, "missing endpoints\n");
++ return -ENODEV;
++ }
++
+ /*
+ * It appears that Treos and Kyoceras want to use the
+ * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
+@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
+ */
+
+ /* some sanity check */
+- if (serial->num_ports < 2)
+- return -1;
++ if (serial->num_bulk_out < 2) {
++ dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
++ return -ENODEV;
++ }
+
+ /* port 0 now uses the modified endpoint Address */
+ port = serial->port[0];
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 82e80e034f25..89bac470f04e 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -166,13 +166,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
+ mutex_unlock(&vb->balloon_lock);
+ }
+
+-static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
++static void release_pages_balloon(struct virtio_balloon *vb)
+ {
+ unsigned int i;
+
+ /* Find pfns pointing at start of each page, get pages and free them. */
+- for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+- struct page *page = balloon_pfn_to_page(pfns[i]);
++ for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
++ struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+ adjust_managed_page_count(page, 1);
+ put_page(page); /* balloon reference */
+ }
+@@ -205,8 +205,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ */
+ if (vb->num_pfns != 0)
+ tell_host(vb, vb->deflate_vq);
++ release_pages_balloon(vb);
+ mutex_unlock(&vb->balloon_lock);
+- release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ return num_freed_pages;
+ }
+
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index eba1b7ac7294..14f767e8e5c5 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -554,6 +554,7 @@ err_enable_device:
+ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ {
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
++ struct device *dev = get_device(&vp_dev->vdev.dev);
+
+ unregister_virtio_device(&vp_dev->vdev);
+
+@@ -564,6 +565,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
+
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
++ put_device(dev);
+ }
+
+ static struct pci_driver virtio_pci_driver = {
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 0ef5cc13fae2..61205e3bbefa 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -192,6 +192,10 @@ struct btrfs_inode {
+ /* File creation time. */
+ struct timespec i_otime;
+
++ /* Hook into fs_info->delayed_iputs */
++ struct list_head delayed_iput;
++ long delayed_iput_count;
++
+ struct inode vfs_inode;
+ };
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 6f364e1d8d3d..699944a07491 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1544,7 +1544,7 @@ struct btrfs_fs_info {
+
+ spinlock_t delayed_iput_lock;
+ struct list_head delayed_iputs;
+- struct rw_semaphore delayed_iput_sem;
++ struct mutex cleaner_delayed_iput_mutex;
+
+ /* this protects tree_mod_seq_list */
+ spinlock_t tree_mod_seq_lock;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 2ef9a4b72d06..99e8f60c7962 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1772,8 +1772,11 @@ static int cleaner_kthread(void *arg)
+ goto sleep;
+ }
+
++ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
+ btrfs_run_delayed_iputs(root);
+ btrfs_delete_unused_bgs(root->fs_info);
++ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
++
+ again = btrfs_clean_one_deleted_snapshot(root);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+
+@@ -2491,8 +2494,8 @@ int open_ctree(struct super_block *sb,
+ mutex_init(&fs_info->unused_bg_unpin_mutex);
+ mutex_init(&fs_info->reloc_mutex);
+ mutex_init(&fs_info->delalloc_root_mutex);
++ mutex_init(&fs_info->cleaner_delayed_iput_mutex);
+ seqlock_init(&fs_info->profiles_lock);
+- init_rwsem(&fs_info->delayed_iput_sem);
+
+ init_completion(&fs_info->kobj_unregister);
+ INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0ec3acd14cbf..3c1938000a5d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3985,11 +3985,12 @@ commit_trans:
+ if (ret)
+ return ret;
+ /*
+- * make sure that all running delayed iput are
+- * done
++ * The cleaner kthread might still be doing iput
++ * operations. Wait for it to finish so that
++ * more space is released.
+ */
+- down_write(&root->fs_info->delayed_iput_sem);
+- up_write(&root->fs_info->delayed_iput_sem);
++ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
++ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+ goto again;
+ } else {
+ btrfs_end_transaction(trans, root);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5136c73b3dce..df4e0462976e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3080,56 +3080,46 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
+ start, (size_t)(end - start + 1));
+ }
+
+-struct delayed_iput {
+- struct list_head list;
+- struct inode *inode;
+-};
+-
+-/* JDM: If this is fs-wide, why can't we add a pointer to
+- * btrfs_inode instead and avoid the allocation? */
+ void btrfs_add_delayed_iput(struct inode *inode)
+ {
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+- struct delayed_iput *delayed;
++ struct btrfs_inode *binode = BTRFS_I(inode);
+
+ if (atomic_add_unless(&inode->i_count, -1, 1))
+ return;
+
+- delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
+- delayed->inode = inode;
+-
+ spin_lock(&fs_info->delayed_iput_lock);
+- list_add_tail(&delayed->list, &fs_info->delayed_iputs);
++ if (binode->delayed_iput_count == 0) {
++ ASSERT(list_empty(&binode->delayed_iput));
++ list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
++ } else {
++ binode->delayed_iput_count++;
++ }
+ spin_unlock(&fs_info->delayed_iput_lock);
+ }
+
+ void btrfs_run_delayed_iputs(struct btrfs_root *root)
+ {
+- LIST_HEAD(list);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+- struct delayed_iput *delayed;
+- int empty;
+-
+- spin_lock(&fs_info->delayed_iput_lock);
+- empty = list_empty(&fs_info->delayed_iputs);
+- spin_unlock(&fs_info->delayed_iput_lock);
+- if (empty)
+- return;
+-
+- down_read(&fs_info->delayed_iput_sem);
+
+ spin_lock(&fs_info->delayed_iput_lock);
+- list_splice_init(&fs_info->delayed_iputs, &list);
+- spin_unlock(&fs_info->delayed_iput_lock);
+-
+- while (!list_empty(&list)) {
+- delayed = list_entry(list.next, struct delayed_iput, list);
+- list_del(&delayed->list);
+- iput(delayed->inode);
+- kfree(delayed);
++ while (!list_empty(&fs_info->delayed_iputs)) {
++ struct btrfs_inode *inode;
++
++ inode = list_first_entry(&fs_info->delayed_iputs,
++ struct btrfs_inode, delayed_iput);
++ if (inode->delayed_iput_count) {
++ inode->delayed_iput_count--;
++ list_move_tail(&inode->delayed_iput,
++ &fs_info->delayed_iputs);
++ } else {
++ list_del_init(&inode->delayed_iput);
++ }
++ spin_unlock(&fs_info->delayed_iput_lock);
++ iput(&inode->vfs_inode);
++ spin_lock(&fs_info->delayed_iput_lock);
+ }
+-
+- up_read(&root->fs_info->delayed_iput_sem);
++ spin_unlock(&fs_info->delayed_iput_lock);
+ }
+
+ /*
+@@ -8890,6 +8880,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ ei->dir_index = 0;
+ ei->last_unlink_trans = 0;
+ ei->last_log_commit = 0;
++ ei->delayed_iput_count = 0;
+
+ spin_lock_init(&ei->lock);
+ ei->outstanding_extents = 0;
+@@ -8914,6 +8905,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ mutex_init(&ei->delalloc_mutex);
+ btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ INIT_LIST_HEAD(&ei->delalloc_inodes);
++ INIT_LIST_HEAD(&ei->delayed_iput);
+ RB_CLEAR_NODE(&ei->rb_node);
+
+ return inode;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 174f5e1e00ab..5113b7257b45 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6322,6 +6322,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
+ goto out_short_read;
+
+ num_stripes = btrfs_chunk_num_stripes(sb, chunk);
++ if (!num_stripes) {
++ printk(KERN_ERR
++ "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
++ num_stripes, cur_offset);
++ ret = -EIO;
++ break;
++ }
++
+ len = btrfs_chunk_item_size(num_stripes);
+ if (cur_offset + len > array_size)
+ goto out_short_read;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 7febcf2475c5..50b268483302 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+- pr_err("CIFS VFS: %pV", &vaf);
++ pr_err_ratelimited("CIFS VFS: %pV", &vaf);
+
+ va_end(args);
+ }
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+index f40fbaca1b2a..66cf0f9fff89 100644
+--- a/fs/cifs/cifs_debug.h
++++ b/fs/cifs/cifs_debug.h
+@@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
+ /* information message: e.g., configuration, major event */
+ #define cifs_dbg(type, fmt, ...) \
+ do { \
+- if (type == FYI) { \
+- if (cifsFYI & CIFS_INFO) { \
+- pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
+- } \
++ if (type == FYI && cifsFYI & CIFS_INFO) { \
++ pr_debug_ratelimited("%s: " \
++ fmt, __FILE__, ##__VA_ARGS__); \
+ } else if (type == VFS) { \
+ cifs_vfs_err(fmt, ##__VA_ARGS__); \
+ } else if (type == NOISY && type != 0) { \
+- pr_debug(fmt, ##__VA_ARGS__); \
++ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 8383d5ea4202..de626b939811 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -357,7 +357,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ server->session_key.response = NULL;
+ server->session_key.len = 0;
+ server->lstrp = jiffies;
+- mutex_unlock(&server->srv_mutex);
+
+ /* mark submitted MIDs for retry and issue callback */
+ INIT_LIST_HEAD(&retry_list);
+@@ -370,6 +369,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ list_move(&mid_entry->qhead, &retry_list);
+ }
+ spin_unlock(&GlobalMid_Lock);
++ mutex_unlock(&server->srv_mutex);
+
+ cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
+ list_for_each_safe(tmp, tmp2, &retry_list) {
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index b1eede3678a9..3634c7adf7d2 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ * if buggy server returns . and .. late do we want to
+ * check for that here?
+ */
++ *tmp_buf = 0;
+ rc = cifs_filldir(current_entry, file, ctx,
+ tmp_buf, max_len);
+ if (rc) {
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 126f46b887cc..66106f6ed7b4 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ cifs_in_send_dec(server);
+ cifs_save_when_sent(mid);
+
+- if (rc < 0)
++ if (rc < 0) {
+ server->sequence_number -= 2;
++ cifs_delete_mid(mid);
++ }
++
+ mutex_unlock(&server->srv_mutex);
+
+ if (rc == 0)
+ return 0;
+
+- cifs_delete_mid(mid);
+ add_credits_and_wake_if(server, credits, optype);
+ return rc;
+ }
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index de2d6245e9fa..f895a85d9304 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+
+ init_special_inode(inode, mode, dev);
+ err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+- if (!err)
++ if (err)
+ goto out_free;
+
+ err = read_name(inode, name);
+ __putname(name);
+ if (err)
+ goto out_put;
+- if (err)
+- goto out_put;
+
+ d_instantiate(dentry, inode);
+ return 0;
+diff --git a/fs/locks.c b/fs/locks.c
+index d3d558ba4da7..8501eecb2af0 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2154,7 +2154,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ goto out;
+ }
+
+-again:
+ error = flock_to_posix_lock(filp, file_lock, &flock);
+ if (error)
+ goto out;
+@@ -2196,19 +2195,22 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- /*
+- * we need that spin_lock here - it prevents reordering between
+- * update of i_flctx->flc_posix and check for it done in close().
+- * rcu_read_lock() wouldn't do.
+- */
+- spin_lock(&current->files->file_lock);
+- f = fcheck(fd);
+- spin_unlock(&current->files->file_lock);
+- if (!error && f != filp && flock.l_type != F_UNLCK) {
+- flock.l_type = F_UNLCK;
+- goto again;
++ if (!error && file_lock->fl_type != F_UNLCK) {
++ /*
++ * We need that spin_lock here - it prevents reordering between
++ * update of i_flctx->flc_posix and check for it done in
++ * close(). rcu_read_lock() wouldn't do.
++ */
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (f != filp) {
++ file_lock->fl_type = F_UNLCK;
++ error = do_lock_file_wait(filp, cmd, file_lock);
++ WARN_ON_ONCE(error);
++ error = -EBADF;
++ }
+ }
+-
+ out:
+ locks_free_lock(file_lock);
+ return error;
+@@ -2294,7 +2296,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ goto out;
+ }
+
+-again:
+ error = flock64_to_posix_lock(filp, file_lock, &flock);
+ if (error)
+ goto out;
+@@ -2336,14 +2337,22 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- spin_lock(&current->files->file_lock);
+- f = fcheck(fd);
+- spin_unlock(&current->files->file_lock);
+- if (!error && f != filp && flock.l_type != F_UNLCK) {
+- flock.l_type = F_UNLCK;
+- goto again;
++ if (!error && file_lock->fl_type != F_UNLCK) {
++ /*
++ * We need that spin_lock here - it prevents reordering between
++ * update of i_flctx->flc_posix and check for it done in
++ * close(). rcu_read_lock() wouldn't do.
++ */
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (f != filp) {
++ file_lock->fl_type = F_UNLCK;
++ error = do_lock_file_wait(filp, cmd, file_lock);
++ WARN_ON_ONCE(error);
++ error = -EBADF;
++ }
+ }
+-
+ out:
+ locks_free_lock(file_lock);
+ return error;
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 892aefff3630..fdd234206dff 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
+ server->options = data->options;
+ server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
+- NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
++ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+
+ if (data->rsize)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index fecd9201dbad..c2abdc7db6c3 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1484,11 +1484,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
+ start = xdr_reserve_space(xdr, 4);
+ BUG_ON(!start);
+
+- if (ff_layout_encode_ioerr(flo, xdr, args))
+- goto out;
+-
++ ff_layout_encode_ioerr(flo, xdr, args);
+ ff_layout_encode_iostats(flo, xdr, args);
+-out:
++
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+ dprintk("%s: Return\n", __func__);
+ }
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 7f22b6c6fb50..723b8922d76b 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -442,7 +442,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
+ inode->i_version = fattr->change_attr;
+- else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
++ else
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE)
+ inode->i_size = nfs_size_to_loff_t(fattr->size);
+@@ -1627,6 +1627,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ unsigned long invalid = 0;
+ unsigned long now = jiffies;
+ unsigned long save_cache_validity;
++ bool cache_revalidated = true;
+
+ dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
+ __func__, inode->i_sb->s_id, inode->i_ino,
+@@ -1688,22 +1689,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ nfs_force_lookup_revalidate(inode);
+ inode->i_version = fattr->change_attr;
+ }
+- } else if (server->caps & NFS_CAP_CHANGE_ATTR)
++ } else {
+ nfsi->cache_validity |= save_cache_validity;
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+- } else if (server->caps & NFS_CAP_MTIME)
++ } else if (server->caps & NFS_CAP_MTIME) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+- } else if (server->caps & NFS_CAP_CTIME)
++ } else if (server->caps & NFS_CAP_CTIME) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ /* Check if our cached file size is stale */
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+@@ -1723,19 +1730,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ (long long)cur_isize,
+ (long long)new_isize);
+ }
+- } else
++ } else {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+
+ if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+- else if (server->caps & NFS_CAP_ATIME)
++ else if (server->caps & NFS_CAP_ATIME) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_MODE) {
+ if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+@@ -1744,36 +1755,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ inode->i_mode = newmode;
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ }
+- } else if (server->caps & NFS_CAP_MODE)
++ } else if (server->caps & NFS_CAP_MODE) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
+ if (!uid_eq(inode->i_uid, fattr->uid)) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ inode->i_uid = fattr->uid;
+ }
+- } else if (server->caps & NFS_CAP_OWNER)
++ } else if (server->caps & NFS_CAP_OWNER) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
+ if (!gid_eq(inode->i_gid, fattr->gid)) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ inode->i_gid = fattr->gid;
+ }
+- } else if (server->caps & NFS_CAP_OWNER_GROUP)
++ } else if (server->caps & NFS_CAP_OWNER_GROUP) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
+ if (inode->i_nlink != fattr->nlink) {
+@@ -1782,19 +1799,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ invalid |= NFS_INO_INVALID_DATA;
+ set_nlink(inode, fattr->nlink);
+ }
+- } else if (server->caps & NFS_CAP_NLINK)
++ } else if (server->caps & NFS_CAP_NLINK) {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
++ cache_revalidated = false;
++ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+ /*
+ * report the blocks in 512byte units
+ */
+ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+- }
+- if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
++ } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ inode->i_blocks = fattr->du.nfs2.blocks;
++ else
++ cache_revalidated = false;
+
+ /* Update attrtimeo value if we're out of the unstable period */
+ if (invalid & NFS_INO_INVALID_ATTR) {
+@@ -1804,9 +1824,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ /* Set barrier to be more recent than all outstanding updates */
+ nfsi->attr_gencount = nfs_inc_attr_generation_counter();
+ } else {
+- if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
+- if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
+- nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
++ if (cache_revalidated) {
++ if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
++ nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
++ nfsi->attrtimeo <<= 1;
++ if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
++ nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
++ }
+ nfsi->attrtimeo_timestamp = now;
+ }
+ /* Set the barrier to be more recent than this fattr */
+@@ -1815,7 +1839,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ }
+
+ /* Don't declare attrcache up to date if there were no attrs! */
+- if (fattr->valid != 0)
++ if (cache_revalidated)
+ invalid &= ~NFS_INO_INVALID_ATTR;
+
+ /* Don't invalidate the data if we were to blame */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 8f393fcc313b..2c4f41c34366 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1284,6 +1284,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ * Protect the call to nfs4_state_set_mode_locked and
+ * serialise the stateid update
+ */
++ spin_lock(&state->owner->so_lock);
+ write_seqlock(&state->seqlock);
+ if (deleg_stateid != NULL) {
+ nfs4_stateid_copy(&state->stateid, deleg_stateid);
+@@ -1292,7 +1293,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ if (open_stateid != NULL)
+ nfs_set_open_stateid_locked(state, open_stateid, fmode);
+ write_sequnlock(&state->seqlock);
+- spin_lock(&state->owner->so_lock);
+ update_open_stateflags(state, fmode);
+ spin_unlock(&state->owner->so_lock);
+ }
+@@ -8512,7 +8512,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
+ .minor_version = 0,
+ .init_caps = NFS_CAP_READDIRPLUS
+ | NFS_CAP_ATOMIC_OPEN
+- | NFS_CAP_CHANGE_ATTR
+ | NFS_CAP_POSIX_LOCK,
+ .init_client = nfs40_init_client,
+ .shutdown_client = nfs40_shutdown_client,
+@@ -8538,7 +8537,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
+ .minor_version = 1,
+ .init_caps = NFS_CAP_READDIRPLUS
+ | NFS_CAP_ATOMIC_OPEN
+- | NFS_CAP_CHANGE_ATTR
+ | NFS_CAP_POSIX_LOCK
+ | NFS_CAP_STATEID_NFSV41
+ | NFS_CAP_ATOMIC_OPEN_V1,
+@@ -8561,7 +8559,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
+ .minor_version = 2,
+ .init_caps = NFS_CAP_READDIRPLUS
+ | NFS_CAP_ATOMIC_OPEN
+- | NFS_CAP_CHANGE_ATTR
+ | NFS_CAP_POSIX_LOCK
+ | NFS_CAP_STATEID_NFSV41
+ | NFS_CAP_ATOMIC_OPEN_V1
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 482cfd34472d..523e485a11b8 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -2518,6 +2518,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+ spin_lock(&dlm->master_lock);
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
+ namelen, target, dlm->node_num);
++ /* get an extra reference on the mle.
++ * otherwise the assert_master from the new
++ * master will destroy this.
++ */
++ dlm_get_mle_inuse(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+@@ -2553,6 +2558,7 @@ fail:
+ if (mle_added) {
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
++ dlm_put_mle_inuse(mle);
+ } else if (mle) {
+ kmem_cache_free(dlm_mle_cache, mle);
+ mle = NULL;
+@@ -2570,17 +2576,6 @@ fail:
+ * ensure that all assert_master work is flushed. */
+ flush_workqueue(dlm->dlm_worker);
+
+- /* get an extra reference on the mle.
+- * otherwise the assert_master from the new
+- * master will destroy this.
+- * also, make sure that all callers of dlm_get_mle
+- * take both dlm->spinlock and dlm->master_lock */
+- spin_lock(&dlm->spinlock);
+- spin_lock(&dlm->master_lock);
+- dlm_get_mle_inuse(mle);
+- spin_unlock(&dlm->master_lock);
+- spin_unlock(&dlm->spinlock);
+-
+ /* notify new node and send all lock state */
+ /* call send_one_lockres with migration flag.
+ * this serves as notice to the target node that a
+@@ -3309,6 +3304,15 @@ top:
+ mle->new_master != dead_node)
+ continue;
+
++ if (mle->new_master == dead_node && mle->inuse) {
++ mlog(ML_NOTICE, "%s: target %u died during "
++ "migration from %u, the MLE is "
++ "still keep used, ignore it!\n",
++ dlm->name, dead_node,
++ mle->master);
++ continue;
++ }
++
+ /* If we have reached this point, this mle needs to be
+ * removed from the list and freed. */
+ dlm_clean_migration_mle(dlm, mle);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 3d90ad7ff91f..f25ff5d3a2f9 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2360,6 +2360,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+ break;
+ }
+ }
++ dlm_lockres_clear_refmap_bit(dlm, res,
++ dead_node);
+ spin_unlock(&res->spinlock);
+ continue;
+ }
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 23157e40dd74..3623ab6fa97f 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
+ unsigned int gen;
+ int noqueue_attempted = 0;
+ int dlm_locked = 0;
++ int kick_dc = 0;
+
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
+ mlog_errno(-EINVAL);
+@@ -1524,7 +1525,12 @@ update_holders:
+ unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
++ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
++ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
++
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
++ if (kick_dc)
++ ocfs2_wake_downconvert_thread(osb);
+ out:
+ /*
+ * This is helping work around a lock inversion between the page lock
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 871fcb67be97..758012bfd5f0 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -22,9 +22,9 @@
+
+ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ {
+- ssize_t list_size, size;
+- char *buf, *name, *value;
+- int error;
++ ssize_t list_size, size, value_size = 0;
++ char *buf, *name, *value = NULL;
++ int uninitialized_var(error);
+
+ if (!old->d_inode->i_op->getxattr ||
+ !new->d_inode->i_op->getxattr)
+@@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ if (!buf)
+ return -ENOMEM;
+
+- error = -ENOMEM;
+- value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
+- if (!value)
+- goto out;
+-
+ list_size = vfs_listxattr(old, buf, list_size);
+ if (list_size <= 0) {
+ error = list_size;
+- goto out_free_value;
++ goto out;
+ }
+
+ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+- size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
+- if (size <= 0) {
++retry:
++ size = vfs_getxattr(old, name, value, value_size);
++ if (size == -ERANGE)
++ size = vfs_getxattr(old, name, NULL, 0);
++
++ if (size < 0) {
+ error = size;
+- goto out_free_value;
++ break;
++ }
++
++ if (size > value_size) {
++ void *new;
++
++ new = krealloc(value, size, GFP_KERNEL);
++ if (!new) {
++ error = -ENOMEM;
++ break;
++ }
++ value = new;
++ value_size = size;
++ goto retry;
+ }
++
+ error = vfs_setxattr(new, name, value, size, 0);
+ if (error)
+- goto out_free_value;
++ break;
+ }
+-
+-out_free_value:
+ kfree(value);
+ out:
+ kfree(buf);
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index ba0db2638946..a1b069e5e363 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -45,6 +45,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ int err;
+ struct dentry *upperdentry;
+
++ /*
++ * Check for permissions before trying to copy-up. This is redundant
++ * since it will be rechecked later by ->setattr() on upper dentry. But
++ * without this, copy-up can be triggered by just about anybody.
++ *
++ * We don't initialize inode->size, which just means that
++ * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
++ * check for a swapfile (which this won't be anyway).
++ */
++ err = inode_change_ok(dentry->d_inode, attr);
++ if (err)
++ return err;
++
+ err = ovl_want_write(dentry);
+ if (err)
+ goto out;
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index 70e9af551600..adcb1398c481 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
+ (int) PTR_ERR(dentry));
+ continue;
+ }
+- ovl_cleanup(upper->d_inode, dentry);
++ if (dentry->d_inode)
++ ovl_cleanup(upper->d_inode, dentry);
+ dput(dentry);
+ }
+ mutex_unlock(&upper->d_inode->i_mutex);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index d74af7f78fec..bd6d5c1e667d 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/namei.h>
++#include <linux/pagemap.h>
+ #include <linux/xattr.h>
+ #include <linux/security.h>
+ #include <linux/mount.h>
+@@ -847,6 +848,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ sb->s_stack_depth = 0;
++ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ if (ufs->config.upperdir) {
+ if (!ufs->config.workdir) {
+ pr_err("overlayfs: missing 'workdir'\n");
+@@ -986,6 +988,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+
+ root_dentry->d_fsdata = oe;
+
++ ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
++ root_dentry->d_inode);
++
+ sb->s_magic = OVERLAYFS_SUPER_MAGIC;
+ sb->s_op = &ovl_super_operations;
+ sb->s_root = root_dentry;
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 6afac3d561ac..78a40ef0c463 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -2052,14 +2052,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ epos->offset += adsize;
+ }
+
++/*
++ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
++ * someone does some weird stuff.
++ */
++#define UDF_MAX_INDIR_EXTS 16
++
+ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
+ struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
+ {
+ int8_t etype;
++ unsigned int indirections = 0;
+
+ while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
+ (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ int block;
++
++ if (++indirections > UDF_MAX_INDIR_EXTS) {
++ udf_err(inode->i_sb,
++ "too many indirect extents in inode %lu\n",
++ inode->i_ino);
++ return -1;
++ }
++
+ epos->block = *eloc;
+ epos->offset = sizeof(struct allocExtDesc);
+ brelse(epos->bh);
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index b84fee372734..2eafe2c4d239 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -133,11 +133,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ if (c < 0x80U)
+ utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
+ else if (c < 0x800U) {
++ if (utf_o->u_len > (UDF_NAME_LEN - 4))
++ break;
+ utf_o->u_name[utf_o->u_len++] =
+ (uint8_t)(0xc0 | (c >> 6));
+ utf_o->u_name[utf_o->u_len++] =
+ (uint8_t)(0x80 | (c & 0x3f));
+ } else {
++ if (utf_o->u_len > (UDF_NAME_LEN - 5))
++ break;
+ utf_o->u_name[utf_o->u_len++] =
+ (uint8_t)(0xe0 | (c >> 12));
+ utf_o->u_name[utf_o->u_len++] =
+@@ -178,17 +182,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+ {
+ unsigned c, i, max_val, utf_char;
+- int utf_cnt, u_len;
++ int utf_cnt, u_len, u_ch;
+
+ memset(ocu, 0, sizeof(dstring) * length);
+ ocu[0] = 8;
+ max_val = 0xffU;
++ u_ch = 1;
+
+ try_again:
+ u_len = 0U;
+ utf_char = 0U;
+ utf_cnt = 0U;
+ for (i = 0U; i < utf->u_len; i++) {
++ /* Name didn't fit? */
++ if (u_len + 1 + u_ch >= length)
++ return 0;
++
+ c = (uint8_t)utf->u_name[i];
+
+ /* Complete a multi-byte UTF-8 character */
+@@ -230,6 +239,7 @@ try_again:
+ if (max_val == 0xffU) {
+ max_val = 0xffffU;
+ ocu[0] = (uint8_t)0x10U;
++ u_ch = 2;
+ goto try_again;
+ }
+ goto error_out;
+@@ -282,7 +292,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
+ c = (c << 8) | ocu[i++];
+
+ len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+- UDF_NAME_LEN - utf_o->u_len);
++ UDF_NAME_LEN - 2 - utf_o->u_len);
+ /* Valid character? */
+ if (len >= 0)
+ utf_o->u_len += len;
+@@ -300,15 +310,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
+ int len;
+ unsigned i, max_val;
+ uint16_t uni_char;
+- int u_len;
++ int u_len, u_ch;
+
+ memset(ocu, 0, sizeof(dstring) * length);
+ ocu[0] = 8;
+ max_val = 0xffU;
++ u_ch = 1;
+
+ try_again:
+ u_len = 0U;
+ for (i = 0U; i < uni->u_len; i++) {
++ /* Name didn't fit? */
++ if (u_len + 1 + u_ch >= length)
++ return 0;
+ len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
+ if (!len)
+ continue;
+@@ -321,6 +335,7 @@ try_again:
+ if (uni_char > max_val) {
+ max_val = 0xffffU;
+ ocu[0] = (uint8_t)0x10U;
++ u_ch = 2;
+ goto try_again;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
+index 6fbf2d853a54..48aff071591d 100644
+--- a/fs/xfs/libxfs/xfs_dquot_buf.c
++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
+@@ -54,7 +54,7 @@ xfs_dqcheck(
+ xfs_dqid_t id,
+ uint type, /* used only when IO_dorepair is true */
+ uint flags,
+- char *str)
++ const char *str)
+ {
+ xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
+ int errs = 0;
+@@ -207,7 +207,8 @@ xfs_dquot_buf_verify_crc(
+ STATIC bool
+ xfs_dquot_buf_verify(
+ struct xfs_mount *mp,
+- struct xfs_buf *bp)
++ struct xfs_buf *bp,
++ int warn)
+ {
+ struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ xfs_dqid_t id = 0;
+@@ -240,8 +241,7 @@ xfs_dquot_buf_verify(
+ if (i == 0)
+ id = be32_to_cpu(ddq->d_id);
+
+- error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+- "xfs_dquot_buf_verify");
++ error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
+ if (error)
+ return false;
+ }
+@@ -256,7 +256,7 @@ xfs_dquot_buf_read_verify(
+
+ if (!xfs_dquot_buf_verify_crc(mp, bp))
+ xfs_buf_ioerror(bp, -EFSBADCRC);
+- else if (!xfs_dquot_buf_verify(mp, bp))
++ else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+
+ if (bp->b_error)
+@@ -264,6 +264,25 @@ xfs_dquot_buf_read_verify(
+ }
+
+ /*
++ * readahead errors are silent and simply leave the buffer as !done so a real
++ * read will then be run with the xfs_dquot_buf_ops verifier. See
++ * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
++ * reporting the failure.
++ */
++static void
++xfs_dquot_buf_readahead_verify(
++ struct xfs_buf *bp)
++{
++ struct xfs_mount *mp = bp->b_target->bt_mount;
++
++ if (!xfs_dquot_buf_verify_crc(mp, bp) ||
++ !xfs_dquot_buf_verify(mp, bp, 0)) {
++ xfs_buf_ioerror(bp, -EIO);
++ bp->b_flags &= ~XBF_DONE;
++ }
++}
++
++/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+@@ -274,7 +293,7 @@ xfs_dquot_buf_write_verify(
+ {
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+- if (!xfs_dquot_buf_verify(mp, bp)) {
++ if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+@@ -286,3 +305,7 @@ const struct xfs_buf_ops xfs_dquot_buf_ops = {
+ .verify_write = xfs_dquot_buf_write_verify,
+ };
+
++const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
++ .verify_read = xfs_dquot_buf_readahead_verify,
++ .verify_write = xfs_dquot_buf_write_verify,
++};
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index 002b6b3a1988..7da6d0b2c2ed 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -63,11 +63,14 @@ xfs_inobp_check(
+ * has not had the inode cores stamped into it. Hence for readahead, the buffer
+ * may be potentially invalid.
+ *
+- * If the readahead buffer is invalid, we don't want to mark it with an error,
+- * but we do want to clear the DONE status of the buffer so that a followup read
+- * will re-read it from disk. This will ensure that we don't get an unnecessary
+- * warnings during log recovery and we don't get unnecssary panics on debug
+- * kernels.
++ * If the readahead buffer is invalid, we need to mark it with an error and
++ * clear the DONE status of the buffer so that a followup read will re-read it
++ * from disk. We don't report the error otherwise to avoid warnings during log
++ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
++ * because all we want to do is say readahead failed; there is no-one to report
++ * the error to, so this will distinguish it from a non-ra verifier failure.
++ * Changes to this readahead error behavour also need to be reflected in
++ * xfs_dquot_buf_readahead_verify().
+ */
+ static void
+ xfs_inode_buf_verify(
+@@ -95,6 +98,7 @@ xfs_inode_buf_verify(
+ XFS_RANDOM_ITOBP_INOTOBP))) {
+ if (readahead) {
+ bp->b_flags &= ~XBF_DONE;
++ xfs_buf_ioerror(bp, -EIO);
+ return;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
+index 1b0a08379759..f51078f1e92a 100644
+--- a/fs/xfs/libxfs/xfs_quota_defs.h
++++ b/fs/xfs/libxfs/xfs_quota_defs.h
+@@ -153,7 +153,7 @@ typedef __uint16_t xfs_qwarncnt_t;
+ #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+
+ extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+- xfs_dqid_t id, uint type, uint flags, char *str);
++ xfs_dqid_t id, uint type, uint flags, const char *str);
+ extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
+
+ #endif /* __XFS_QUOTA_H__ */
+diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
+index 8dda4b321343..a3472a38efd2 100644
+--- a/fs/xfs/libxfs/xfs_shared.h
++++ b/fs/xfs/libxfs/xfs_shared.h
+@@ -49,6 +49,7 @@ extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+ extern const struct xfs_buf_ops xfs_inode_buf_ops;
+ extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+ extern const struct xfs_buf_ops xfs_dquot_buf_ops;
++extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
+ extern const struct xfs_buf_ops xfs_sb_buf_ops;
+ extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 1790b00bea7a..7dd64bf98c56 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -605,6 +605,13 @@ found:
+ }
+ }
+
++ /*
++ * Clear b_error if this is a lookup from a caller that doesn't expect
++ * valid data to be found in the buffer.
++ */
++ if (!(flags & XBF_READ))
++ xfs_buf_ioerror(bp, 0);
++
+ XFS_STATS_INC(xb_get);
+ trace_xfs_buf_get(bp, flags, _RET_IP_);
+ return bp;
+@@ -1522,6 +1529,16 @@ xfs_wait_buftarg(
+ LIST_HEAD(dispose);
+ int loop = 0;
+
++ /*
++ * We need to flush the buffer workqueue to ensure that all IO
++ * completion processing is 100% done. Just waiting on buffer locks is
++ * not sufficient for async IO as the reference count held over IO is
++ * not released until after the buffer lock is dropped. Hence we need to
++ * ensure here that all reference counts have been dropped before we
++ * start walking the LRU list.
++ */
++ drain_workqueue(btp->bt_mount->m_buf_workqueue);
++
+ /* loop until there is nothing left on the lru list. */
+ while (list_lru_count(&btp->bt_lru)) {
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index a5d03396dda0..1114afdd5a6b 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3154,6 +3154,7 @@ xlog_recover_dquot_ra_pass2(
+ struct xfs_disk_dquot *recddq;
+ struct xfs_dq_logformat *dq_f;
+ uint type;
++ int len;
+
+
+ if (mp->m_qflags == 0)
+@@ -3174,8 +3175,12 @@ xlog_recover_dquot_ra_pass2(
+ ASSERT(dq_f);
+ ASSERT(dq_f->qlf_len == 1);
+
+- xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
+- XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
++ len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
++ if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
++ return;
++
++ xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
++ &xfs_dquot_buf_ra_ops);
+ }
+
+ STATIC void
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 98abda9ed3aa..bbc59bdd6395 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -199,6 +199,7 @@ struct crypto_ahash {
+ unsigned int keylen);
+
+ unsigned int reqsize;
++ bool has_setkey;
+ struct crypto_tfm base;
+ };
+
+@@ -356,6 +357,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
+ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
++{
++ return tfm->has_setkey;
++}
++
+ /**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 018afb264ac2..a2bfd7843f18 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -30,6 +30,9 @@ struct alg_sock {
+
+ struct sock *parent;
+
++ unsigned int refcnt;
++ unsigned int nokey_refcnt;
++
+ const struct af_alg_type *type;
+ void *private;
+ };
+@@ -50,9 +53,11 @@ struct af_alg_type {
+ void (*release)(void *private);
+ int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*accept)(void *private, struct sock *sk);
++ int (*accept_nokey)(void *private, struct sock *sk);
+ int (*setauthsize)(void *private, unsigned int authsize);
+
+ struct proto_ops *ops;
++ struct proto_ops *ops_nokey;
+ struct module *owner;
+ char name[14];
+ };
+@@ -67,6 +72,7 @@ int af_alg_register_type(const struct af_alg_type *type);
+ int af_alg_unregister_type(const struct af_alg_type *type);
+
+ int af_alg_release(struct socket *sock);
++void af_alg_release_parent(struct sock *sk);
+ int af_alg_accept(struct sock *sk, struct socket *newsock);
+
+ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
+@@ -83,11 +89,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
+ return (struct alg_sock *)sk;
+ }
+
+-static inline void af_alg_release_parent(struct sock *sk)
+-{
+- sock_put(alg_sk(sk)->parent);
+-}
+-
+ static inline void af_alg_init_completion(struct af_alg_completion *completion)
+ {
+ init_completion(&completion->completion);
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 7bfb063029d8..461a0558bca4 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -35,4 +35,13 @@
+
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+
++static inline bool drm_arch_can_wc_memory(void)
++{
++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++ return false;
++#else
++ return true;
++#endif
++}
++
+ #endif
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 54233583c6cb..ca71c03143d1 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -731,8 +731,6 @@ struct drm_connector {
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+-
+- struct list_head destroy_list;
+ };
+
+ /**
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index a89f505c856b..c7f01d1aa562 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -449,9 +449,7 @@ struct drm_dp_mst_topology_mgr {
+ the mstb tx_slots and txmsg->state once they are queued */
+ struct mutex qlock;
+ struct list_head tx_msg_downq;
+- struct list_head tx_msg_upq;
+ bool tx_down_in_progress;
+- bool tx_up_in_progress;
+
+ /* payload info + lock for it */
+ struct mutex payload_lock;
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index d639049a613d..553210c02ee0 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
++#define DRM_FIXED_EPSILON 1LL
++#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+
+ static inline s64 drm_int2fixp(int a)
+ {
+ return ((s64)a) << DRM_FIXED_POINT;
+ }
+
+-static inline int drm_fixp2int(int64_t a)
++static inline int drm_fixp2int(s64 a)
+ {
+ return ((s64)a) >> DRM_FIXED_POINT;
+ }
+
+-static inline unsigned drm_fixp_msbset(int64_t a)
++static inline int drm_fixp2int_ceil(s64 a)
++{
++ if (a > 0)
++ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
++ else
++ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
++}
++
++static inline unsigned drm_fixp_msbset(s64 a)
+ {
+ unsigned shift, sign = (a >> 63) & 1;
+
+@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
+ return result;
+ }
+
++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
++{
++ s64 res;
++ bool a_neg = a < 0;
++ bool b_neg = b < 0;
++ u64 a_abs = a_neg ? -a : a;
++ u64 b_abs = b_neg ? -b : b;
++ u64 rem;
++
++ /* determine integer part */
++ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
++
++ /* determine fractional part */
++ {
++ u32 i = DRM_FIXED_POINT;
++
++ do {
++ rem <<= 1;
++ res_abs <<= 1;
++ if (rem >= b_abs) {
++ res_abs |= 1;
++ rem -= b_abs;
++ }
++ } while (--i != 0);
++ }
++
++ /* round up LSB */
++ {
++ u64 summand = (rem << 1) >= b_abs;
++
++ res_abs += summand;
++ }
++
++ res = (s64) res_abs;
++ if (a_neg ^ b_neg)
++ res = -res;
++ return res;
++}
++
+ static inline s64 drm_fixp_exp(s64 x)
+ {
+ s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index e15499422fdc..e91c6f15f6e8 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -224,6 +224,7 @@ struct ceph_connection {
+ struct ceph_entity_addr actual_peer_addr;
+
+ /* message out temps */
++ struct ceph_msg_header out_hdr;
+ struct ceph_msg *out_msg; /* sending message (== tail of
+ out_sent) */
+ bool out_msg_done;
+@@ -233,7 +234,6 @@ struct ceph_connection {
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+- bool out_kvec_is_msg; /* kvec refers to out_msg */
+ int out_more; /* there is more data after the kvecs */
+ __le64 out_temp_ack; /* for writing an ack */
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 9f50fb413c11..901555a3886e 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -149,6 +149,7 @@ extern int console_trylock(void);
+ extern void console_unlock(void);
+ extern void console_conditional_schedule(void);
+ extern void console_unblank(void);
++extern void console_flush_on_panic(void);
+ extern struct tty_driver *console_device(int *);
+ extern void console_stop(struct console *);
+ extern void console_start(struct console *);
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 5e1273d4de14..eda4a72a9b25 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -220,7 +220,7 @@ struct nfs_server {
+ #define NFS_CAP_SYMLINKS (1U << 2)
+ #define NFS_CAP_ACLS (1U << 3)
+ #define NFS_CAP_ATOMIC_OPEN (1U << 4)
+-#define NFS_CAP_CHANGE_ATTR (1U << 5)
++/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
+ #define NFS_CAP_FILEID (1U << 6)
+ #define NFS_CAP_MODE (1U << 7)
+ #define NFS_CAP_NLINK (1U << 8)
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 33170dbd9db4..5d5174b59802 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, unsigned flags);
+
+ /**
++ * radix_tree_iter_retry - retry this chunk of the iteration
++ * @iter: iterator state
++ *
++ * If we iterate over a tree protected only by the RCU lock, a race
++ * against deletion or creation may result in seeing a slot for which
++ * radix_tree_deref_retry() returns true. If so, call this function
++ * and continue the iteration.
++ */
++static inline __must_check
++void **radix_tree_iter_retry(struct radix_tree_iter *iter)
++{
++ iter->next_index = iter->index;
++ return NULL;
++}
++
++/**
+ * radix_tree_chunk_size - get current chunk size
+ *
+ * @iter: pointer to radix tree iterator
+ * Returns: current chunk size
+ */
+-static __always_inline unsigned
++static __always_inline long
+ radix_tree_chunk_size(struct radix_tree_iter *iter)
+ {
+ return iter->next_index - iter->index;
+@@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
+ return slot + offset + 1;
+ }
+ } else {
+- unsigned size = radix_tree_chunk_size(iter) - 1;
++ long size = radix_tree_chunk_size(iter);
+
+- while (size--) {
++ while (--size > 0) {
+ slot++;
+ iter->index++;
+ if (likely(*slot))
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index c89c53a113a8..6f48ddc4b2b5 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -105,20 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
+ __put_anon_vma(anon_vma);
+ }
+
+-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
+-{
+- struct anon_vma *anon_vma = vma->anon_vma;
+- if (anon_vma)
+- down_write(&anon_vma->root->rwsem);
+-}
+-
+-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
+-{
+- struct anon_vma *anon_vma = vma->anon_vma;
+- if (anon_vma)
+- up_write(&anon_vma->root->rwsem);
+-}
+-
+ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
+ {
+ down_write(&anon_vma->root->rwsem);
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 5eac316490ea..2e7d0f7a0ecc 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -40,6 +40,9 @@
+ /* No upper/lower limit requirement */
+ #define THERMAL_NO_LIMIT ((u32)~0)
+
++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
++#define THERMAL_TEMP_INVALID -274000
++
+ /* Unit conversion macros */
+ #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
+ ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
+@@ -159,6 +162,7 @@ struct thermal_attr {
+ * @forced_passive: If > 0, temperature at which to switch on all ACPI
+ * processor cooling devices. Currently only used by the
+ * step-wise governor.
++ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
+ * @ops: operations this &thermal_zone_device supports
+ * @tzp: thermal zone parameters
+ * @governor: pointer to the governor for this thermal zone
+@@ -185,6 +189,7 @@ struct thermal_zone_device {
+ int emul_temperature;
+ int passive;
+ unsigned int forced_passive;
++ atomic_t need_update;
+ struct thermal_zone_device_ops *ops;
+ const struct thermal_zone_params *tzp;
+ struct thermal_governor *governor;
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index f6cbef78db62..3b91ad5d5115 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++ unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
++ int count);
+
+ /* main midi functions */
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 2579e407ff67..f3043db6d36f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2632,6 +2632,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
++ /*
++ * Drop the reference to the pi state which
++ * the requeue_pi() code acquired for us.
++ */
++ free_pi_state(q.pi_state);
+ spin_unlock(q.lock_ptr);
+ }
+ } else {
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 8136ad76e5fd..a4f7820f5930 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -23,6 +23,7 @@
+ #include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/nmi.h>
++#include <linux/console.h>
+
+ #define PANIC_TIMER_STEP 100
+ #define PANIC_BLINK_SPD 18
+@@ -146,6 +147,17 @@ void panic(const char *fmt, ...)
+
+ bust_spinlocks(0);
+
++ /*
++ * We may have ended up stopping the CPU holding the lock (in
++ * smp_send_stop()) while still having some valuable data in the console
++ * buffer. Try to acquire the lock then release it regardless of the
++ * result. The release will also print the buffers out. Locks debug
++ * should be disabled to avoid reporting bad unlock balance when
++ * panic() is not being callled from OOPS.
++ */
++ debug_locks_off();
++ console_flush_on_panic();
++
+ if (!panic_blink)
+ panic_blink = no_blink;
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index bff0169e1ad8..3c1aca0c3543 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2173,13 +2173,24 @@ void console_unlock(void)
+ static u64 seen_seq;
+ unsigned long flags;
+ bool wake_klogd = false;
+- bool retry;
++ bool do_cond_resched, retry;
+
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
+
++ /*
++ * Console drivers are called under logbuf_lock, so
++ * @console_may_schedule should be cleared before; however, we may
++ * end up dumping a lot of lines, for example, if called from
++ * console registration path, and should invoke cond_resched()
++ * between lines if allowable. Not doing so can cause a very long
++ * scheduling stall on a slow console leading to RCU stall and
++ * softlockup warnings which exacerbate the issue with more
++ * messages practically incapacitating the system.
++ */
++ do_cond_resched = console_may_schedule;
+ console_may_schedule = 0;
+
+ /* flush buffered message fragment immediately to console */
+@@ -2241,6 +2252,9 @@ skip:
+ call_console_drivers(level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++
++ if (do_cond_resched)
++ cond_resched();
+ }
+ console_locked = 0;
+
+@@ -2308,6 +2322,25 @@ void console_unblank(void)
+ console_unlock();
+ }
+
++/**
++ * console_flush_on_panic - flush console content on panic
++ *
++ * Immediately output all pending messages no matter what.
++ */
++void console_flush_on_panic(void)
++{
++ /*
++ * If someone else is holding the console lock, trylock will fail
++ * and may_schedule may be set. Ignore and proceed to unlock so
++ * that messages are flushed out. As this can be called from any
++ * context and we don't want to get preempted while flushing,
++ * ensure may_schedule is cleared.
++ */
++ console_trylock();
++ console_may_schedule = 0;
++ console_unlock();
++}
++
+ /*
+ * Return the console tty driver structure and its associated index
+ */
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 4f44028943e6..30c682adcdeb 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -317,24 +317,24 @@ static inline void seccomp_sync_threads(void)
+ put_seccomp_filter(thread);
+ smp_store_release(&thread->seccomp.filter,
+ caller->seccomp.filter);
++
++ /*
++ * Don't let an unprivileged task work around
++ * the no_new_privs restriction by creating
++ * a thread that sets it up, enters seccomp,
++ * then dies.
++ */
++ if (task_no_new_privs(caller))
++ task_set_no_new_privs(thread);
++
+ /*
+ * Opt the other thread into seccomp if needed.
+ * As threads are considered to be trust-realm
+ * equivalent (see ptrace_may_access), it is safe to
+ * allow one thread to transition the other.
+ */
+- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+- /*
+- * Don't let an unprivileged task work around
+- * the no_new_privs restriction by creating
+- * a thread that sets it up, enters seccomp,
+- * then dies.
+- */
+- if (task_no_new_privs(caller))
+- task_set_no_new_privs(thread);
+-
++ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+- }
+ }
+ }
+
+diff --git a/kernel/sys.c b/kernel/sys.c
+index a4e372b798a5..25ae8d2e65e2 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1854,11 +1854,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
+ user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
+ }
+
+- if (prctl_map.exe_fd != (u32)-1)
++ if (prctl_map.exe_fd != (u32)-1) {
+ error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
+- down_read(&mm->mmap_sem);
+- if (error)
+- goto out;
++ if (error)
++ return error;
++ }
++
++ down_write(&mm->mmap_sem);
+
+ /*
+ * We don't validate if these members are pointing to
+@@ -1895,10 +1897,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
+ if (prctl_map.auxv_size)
+ memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
+
+- error = 0;
+-out:
+- up_read(&mm->mmap_sem);
+- return error;
++ up_write(&mm->mmap_sem);
++ return 0;
+ }
+ #endif /* CONFIG_CHECKPOINT_RESTORE */
+
+@@ -1930,7 +1930,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
+
+ error = -EINVAL;
+
+- down_read(&mm->mmap_sem);
++ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+
+ switch (opt) {
+@@ -2033,7 +2033,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
+
+ error = 0;
+ out:
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ return error;
+ }
+
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7aa2e8..9cff0ab82b63 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ struct posix_clock *clk = get_posix_clock(fp);
+- int result = 0;
++ unsigned int result = 0;
+
+ if (!clk)
+- return -ENODEV;
++ return POLLERR;
+
+ if (clk->ops.poll)
+ result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 414d9df94724..65dbf8aee751 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -316,8 +316,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+
+ delta = timekeeping_get_delta(tkr);
+
+- nsec = delta * tkr->mult + tkr->xtime_nsec;
+- nsec >>= tkr->shift;
++ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
+
+ /* If arch requires, add in get_arch_timeoffset() */
+ return nsec + arch_gettimeoffset();
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index dace71fe41f7..517a568f038d 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
+
+ static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
+ {
+- if (overlap(addr, len, _text, _etext) ||
++ if (overlap(addr, len, _stext, _etext) ||
+ overlap(addr, len, __start_rodata, __end_rodata))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+ }
+diff --git a/lib/dump_stack.c b/lib/dump_stack.c
+index 6745c6230db3..c30d07e99dba 100644
+--- a/lib/dump_stack.c
++++ b/lib/dump_stack.c
+@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
+
+ asmlinkage __visible void dump_stack(void)
+ {
++ unsigned long flags;
+ int was_locked;
+ int old;
+ int cpu;
+@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
+ * Permit this cpu to perform nested stack dumps while serialising
+ * against other CPUs
+ */
+- preempt_disable();
+-
+ retry:
++ local_irq_save(flags);
+ cpu = smp_processor_id();
+ old = atomic_cmpxchg(&dump_lock, -1, cpu);
+ if (old == -1) {
+@@ -43,6 +43,7 @@ retry:
+ } else if (old == cpu) {
+ was_locked = 1;
+ } else {
++ local_irq_restore(flags);
+ cpu_relax();
+ goto retry;
+ }
+@@ -52,7 +53,7 @@ retry:
+ if (!was_locked)
+ atomic_set(&dump_lock, -1);
+
+- preempt_enable();
++ local_irq_restore(flags);
+ }
+ #else
+ asmlinkage __visible void dump_stack(void)
+diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
+index 6a08ce7d6adc..acf9da449f81 100644
+--- a/lib/libcrc32c.c
++++ b/lib/libcrc32c.c
+@@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini);
+ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: crc32c");
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 3d2aa27b845b..8399002aa0f0 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1014,9 +1014,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ return 0;
+
+ radix_tree_for_each_slot(slot, root, &iter, first_index) {
+- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++ results[ret] = rcu_dereference_raw(*slot);
+ if (!results[ret])
+ continue;
++ if (radix_tree_is_indirect_ptr(results[ret])) {
++ slot = radix_tree_iter_retry(&iter);
++ continue;
++ }
+ if (++ret == max_items)
+ break;
+ }
+@@ -1093,9 +1097,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+ return 0;
+
+ radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
+- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++ results[ret] = rcu_dereference_raw(*slot);
+ if (!results[ret])
+ continue;
++ if (radix_tree_is_indirect_ptr(results[ret])) {
++ slot = radix_tree_iter_retry(&iter);
++ continue;
++ }
+ if (++ret == max_items)
+ break;
+ }
+diff --git a/lib/string_helpers.c b/lib/string_helpers.c
+index c98ae818eb4e..33e79b5eea77 100644
+--- a/lib/string_helpers.c
++++ b/lib/string_helpers.c
+@@ -43,46 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ [STRING_UNITS_10] = 1000,
+ [STRING_UNITS_2] = 1024,
+ };
+- int i, j;
+- u32 remainder = 0, sf_cap, exp;
++ static const unsigned int rounding[] = { 500, 50, 5 };
++ int i = 0, j;
++ u32 remainder = 0, sf_cap;
+ char tmp[8];
+ const char *unit;
+
+ tmp[0] = '\0';
+- i = 0;
+- if (!size)
++
++ if (blk_size == 0)
++ size = 0;
++ if (size == 0)
+ goto out;
+
+- while (blk_size >= divisor[units]) {
+- remainder = do_div(blk_size, divisor[units]);
++ /* This is Napier's algorithm. Reduce the original block size to
++ *
++ * coefficient * divisor[units]^i
++ *
++ * we do the reduction so both coefficients are just under 32 bits so
++ * that multiplying them together won't overflow 64 bits and we keep
++ * as much precision as possible in the numbers.
++ *
++ * Note: it's safe to throw away the remainders here because all the
++ * precision is in the coefficients.
++ */
++ while (blk_size >> 32) {
++ do_div(blk_size, divisor[units]);
+ i++;
+ }
+
+- exp = divisor[units] / (u32)blk_size;
+- if (size >= exp) {
+- remainder = do_div(size, divisor[units]);
+- remainder *= blk_size;
++ while (size >> 32) {
++ do_div(size, divisor[units]);
+ i++;
+- } else {
+- remainder *= size;
+ }
+
++ /* now perform the actual multiplication keeping i as the sum of the
++ * two logarithms */
+ size *= blk_size;
+- size += remainder / divisor[units];
+- remainder %= divisor[units];
+
++ /* and logarithmically reduce it until it's just under the divisor */
+ while (size >= divisor[units]) {
+ remainder = do_div(size, divisor[units]);
+ i++;
+ }
+
++ /* work out in j how many digits of precision we need from the
++ * remainder */
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+- if (j) {
++ if (units == STRING_UNITS_2) {
++ /* express the remainder as a decimal. It's currently the
++ * numerator of a fraction whose denominator is
++ * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ remainder *= 1000;
+- remainder /= divisor[units];
++ remainder >>= 10;
++ }
++
++ /* add a 5 to the digit below what will be printed to ensure
++ * an arithmetical round up and carry it through to size */
++ remainder += rounding[j];
++ if (remainder >= 1000) {
++ remainder -= 1000;
++ size += 1;
++ }
++
++ if (j) {
+ snprintf(tmp, sizeof(tmp), ".%03u", remainder);
+ tmp[j+1] = '\0';
+ }
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index fcad8322ef36..b640609bcd17 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ bool dequeued_page;
+
+ dequeued_page = false;
++ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
+ /*
+ * Block others from accessing the 'page' while we get around
+@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ continue;
+ }
+ #endif
+- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_delete(page);
+ __count_vm_event(BALLOON_DEFLATE);
+- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ unlock_page(page);
+ dequeued_page = true;
+ break;
+ }
+ }
++ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+ if (!dequeued_page) {
+ /*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 68dea90334cb..aac1c98a9bc7 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3824,16 +3824,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
+ swap_buffers:
+ /* Swap primary and spare array */
+ thresholds->spare = thresholds->primary;
+- /* If all events are unregistered, free the spare array */
+- if (!new) {
+- kfree(thresholds->spare);
+- thresholds->spare = NULL;
+- }
+
+ rcu_assign_pointer(thresholds->primary, new);
+
+ /* To be sure that nobody uses thresholds */
+ synchronize_rcu();
++
++ /* If all events are unregistered, free the spare array */
++ if (!new) {
++ kfree(thresholds->spare);
++ thresholds->spare = NULL;
++ }
+ unlock:
+ mutex_unlock(&memcg->thresholds_lock);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9f48145c884f..e26bc59d7dff 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1557,7 +1557,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
+ * Did it turn free?
+ */
+ ret = __get_any_page(page, pfn, 0);
+- if (!PageLRU(page)) {
++ if (ret == 1 && !PageLRU(page)) {
+ /* Drop page reference which is from __get_any_page() */
+ put_page(page);
+ pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 6fd2cf15e868..3d3ee6cad776 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
+ */
+ unsigned int munlock_vma_page(struct page *page)
+ {
+- unsigned int nr_pages;
++ int nr_pages;
+ struct zone *zone = page_zone(page);
+
+ /* For try_to_munlock() and to serialize with page migration */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index bb50cacc3ea5..b639fa2721d8 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -440,12 +440,16 @@ static void validate_mm(struct mm_struct *mm)
+ struct vm_area_struct *vma = mm->mmap;
+
+ while (vma) {
++ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+
+- vma_lock_anon_vma(vma);
+- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+- anon_vma_interval_tree_verify(avc);
+- vma_unlock_anon_vma(vma);
++ if (anon_vma) {
++ anon_vma_lock_read(anon_vma);
++ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
++ anon_vma_interval_tree_verify(avc);
++ anon_vma_unlock_read(anon_vma);
++ }
++
+ highest_address = vma->vm_end;
+ vma = vma->vm_next;
+ i++;
+@@ -2141,32 +2145,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ */
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+- int error;
++ int error = 0;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
+- /*
+- * We must make sure the anon_vma is allocated
+- * so that the anon_vma locking is not a noop.
+- */
++ /* Guard against wrapping around to address 0. */
++ if (address < PAGE_ALIGN(address+4))
++ address = PAGE_ALIGN(address+4);
++ else
++ return -ENOMEM;
++
++ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
+- vma_lock_anon_vma(vma);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+ * anon_vma lock to serialize against concurrent expand_stacks.
+- * Also guard against wrapping around to address 0.
+ */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else {
+- vma_unlock_anon_vma(vma);
+- return -ENOMEM;
+- }
+- error = 0;
++ anon_vma_lock_write(vma->anon_vma);
+
+ /* Somebody else might have raced and expanded it already */
+ if (address > vma->vm_end) {
+@@ -2184,7 +2183,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ * updates, but we only hold a shared mmap_sem
+ * lock here, so we need to protect against
+ * concurrent vma expansions.
+- * vma_lock_anon_vma() doesn't help here, as
++ * anon_vma_lock_write() doesn't help here, as
+ * we don't guarantee that all growable vmas
+ * in a mm share the same root anon vma.
+ * So, we reuse mm->page_table_lock to guard
+@@ -2204,7 +2203,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ }
+ }
+ }
+- vma_unlock_anon_vma(vma);
++ anon_vma_unlock_write(vma->anon_vma);
+ khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ validate_mm(vma->vm_mm);
+ return error;
+@@ -2219,25 +2218,21 @@ int expand_downwards(struct vm_area_struct *vma,
+ {
+ int error;
+
+- /*
+- * We must make sure the anon_vma is allocated
+- * so that the anon_vma locking is not a noop.
+- */
+- if (unlikely(anon_vma_prepare(vma)))
+- return -ENOMEM;
+-
+ address &= PAGE_MASK;
+ error = security_mmap_addr(address);
+ if (error)
+ return error;
+
+- vma_lock_anon_vma(vma);
++ /* We must make sure the anon_vma is allocated. */
++ if (unlikely(anon_vma_prepare(vma)))
++ return -ENOMEM;
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+ * anon_vma lock to serialize against concurrent expand_stacks.
+ */
++ anon_vma_lock_write(vma->anon_vma);
+
+ /* Somebody else might have raced and expanded it already */
+ if (address < vma->vm_start) {
+@@ -2255,7 +2250,7 @@ int expand_downwards(struct vm_area_struct *vma,
+ * updates, but we only hold a shared mmap_sem
+ * lock here, so we need to protect against
+ * concurrent vma expansions.
+- * vma_lock_anon_vma() doesn't help here, as
++ * anon_vma_lock_write() doesn't help here, as
+ * we don't guarantee that all growable vmas
+ * in a mm share the same root anon vma.
+ * So, we reuse mm->page_table_lock to guard
+@@ -2273,7 +2268,7 @@ int expand_downwards(struct vm_area_struct *vma,
+ }
+ }
+ }
+- vma_unlock_anon_vma(vma);
++ anon_vma_unlock_write(vma->anon_vma);
+ khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ validate_mm(vma->vm_mm);
+ return error;
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index a8b5e749e84e..fb1ec10ce449 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -306,7 +306,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
+
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
+- *(unsigned long *)handle = obj;
++ /*
++ * lsb of @obj represents handle lock while other bits
++ * represent object value the handle is pointing so
++ * updating shouldn't do store tearing.
++ */
++ WRITE_ONCE(*(unsigned long *)handle, obj);
+ }
+
+ /* zpool driver */
+@@ -1641,6 +1646,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
+ free_obj = obj_malloc(d_page, class, handle);
+ zs_object_copy(used_obj, free_obj, class);
+ index++;
++ /*
++ * record_obj updates handle's value to free_obj and it will
++ * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
++ * breaks synchronization using pin_tag(e,g, zs_free) so
++ * let's keep the lock bit.
++ */
++ free_obj |= BIT(HANDLE_PIN_BIT);
+ record_obj(handle, free_obj);
+ unpin_tag(handle);
+ obj_free(pool, class, used_obj);
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 967080a9f043..e51af69c61bf 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -675,6 +675,8 @@ static void reset_connection(struct ceph_connection *con)
+ }
+ con->in_seq = 0;
+ con->in_seq_acked = 0;
++
++ con->out_skip = 0;
+ }
+
+ /*
+@@ -774,6 +776,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
+
+ static void con_out_kvec_reset(struct ceph_connection *con)
+ {
++ BUG_ON(con->out_skip);
++
+ con->out_kvec_left = 0;
+ con->out_kvec_bytes = 0;
+ con->out_kvec_cur = &con->out_kvec[0];
+@@ -782,9 +786,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
+ static void con_out_kvec_add(struct ceph_connection *con,
+ size_t size, void *data)
+ {
+- int index;
++ int index = con->out_kvec_left;
+
+- index = con->out_kvec_left;
++ BUG_ON(con->out_skip);
+ BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
+
+ con->out_kvec[index].iov_len = size;
+@@ -793,6 +797,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
+ con->out_kvec_bytes += size;
+ }
+
++/*
++ * Chop off a kvec from the end. Return residual number of bytes for
++ * that kvec, i.e. how many bytes would have been written if the kvec
++ * hadn't been nuked.
++ */
++static int con_out_kvec_skip(struct ceph_connection *con)
++{
++ int off = con->out_kvec_cur - con->out_kvec;
++ int skip = 0;
++
++ if (con->out_kvec_bytes > 0) {
++ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
++ BUG_ON(con->out_kvec_bytes < skip);
++ BUG_ON(!con->out_kvec_left);
++ con->out_kvec_bytes -= skip;
++ con->out_kvec_left--;
++ }
++
++ return skip;
++}
++
+ #ifdef CONFIG_BLOCK
+
+ /*
+@@ -1200,7 +1225,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
+ m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+
+ dout("prepare_write_message_footer %p\n", con);
+- con->out_kvec_is_msg = true;
+ con->out_kvec[v].iov_base = &m->footer;
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ if (con->ops->sign_message)
+@@ -1228,7 +1252,6 @@ static void prepare_write_message(struct ceph_connection *con)
+ u32 crc;
+
+ con_out_kvec_reset(con);
+- con->out_kvec_is_msg = true;
+ con->out_msg_done = false;
+
+ /* Sneak an ack in there first? If we can get it into the same
+@@ -1268,18 +1291,19 @@ static void prepare_write_message(struct ceph_connection *con)
+
+ /* tag + hdr + front + middle */
+ con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
++ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
+ con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+
+ if (m->middle)
+ con_out_kvec_add(con, m->middle->vec.iov_len,
+ m->middle->vec.iov_base);
+
+- /* fill in crc (except data pages), footer */
++ /* fill in hdr crc and finalize hdr */
+ crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
+ con->out_msg->hdr.crc = cpu_to_le32(crc);
+- con->out_msg->footer.flags = 0;
++ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
+
++ /* fill in front and middle crc, footer */
+ crc = crc32c(0, m->front.iov_base, m->front.iov_len);
+ con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ if (m->middle) {
+@@ -1291,6 +1315,7 @@ static void prepare_write_message(struct ceph_connection *con)
+ dout("%s front_crc %u middle_crc %u\n", __func__,
+ le32_to_cpu(con->out_msg->footer.front_crc),
+ le32_to_cpu(con->out_msg->footer.middle_crc));
++ con->out_msg->footer.flags = 0;
+
+ /* is there a data payload? */
+ con->out_msg->footer.data_crc = 0;
+@@ -1485,7 +1510,6 @@ static int write_partial_kvec(struct ceph_connection *con)
+ }
+ }
+ con->out_kvec_left = 0;
+- con->out_kvec_is_msg = false;
+ ret = 1;
+ out:
+ dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+@@ -1577,6 +1601,7 @@ static int write_partial_skip(struct ceph_connection *con)
+ {
+ int ret;
+
++ dout("%s %p %d left\n", __func__, con, con->out_skip);
+ while (con->out_skip > 0) {
+ size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+
+@@ -2493,13 +2518,13 @@ more:
+
+ more_kvec:
+ /* kvec data queued? */
+- if (con->out_skip) {
+- ret = write_partial_skip(con);
++ if (con->out_kvec_left) {
++ ret = write_partial_kvec(con);
+ if (ret <= 0)
+ goto out;
+ }
+- if (con->out_kvec_left) {
+- ret = write_partial_kvec(con);
++ if (con->out_skip) {
++ ret = write_partial_skip(con);
+ if (ret <= 0)
+ goto out;
+ }
+@@ -3026,16 +3051,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
+ ceph_msg_put(msg);
+ }
+ if (con->out_msg == msg) {
+- dout("%s %p msg %p - was sending\n", __func__, con, msg);
+- con->out_msg = NULL;
+- if (con->out_kvec_is_msg) {
+- con->out_skip = con->out_kvec_bytes;
+- con->out_kvec_is_msg = false;
++ BUG_ON(con->out_skip);
++ /* footer */
++ if (con->out_msg_done) {
++ con->out_skip += con_out_kvec_skip(con);
++ } else {
++ BUG_ON(!msg->data_length);
++ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
++ con->out_skip += sizeof(msg->footer);
++ else
++ con->out_skip += sizeof(msg->old_footer);
+ }
++ /* data, middle, front */
++ if (msg->data_length)
++ con->out_skip += msg->cursor.total_resid;
++ if (msg->middle)
++ con->out_skip += con_out_kvec_skip(con);
++ con->out_skip += con_out_kvec_skip(con);
++
++ dout("%s %p msg %p - was sending, will write %d skip %d\n",
++ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
+ msg->hdr.seq = 0;
+-
++ con->out_msg = NULL;
+ ceph_msg_put(msg);
+ }
++
+ mutex_unlock(&con->mutex);
+ }
+
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index a9c9d961f039..41adfc898a18 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1727,7 +1727,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ continue;
+ sdata->u.ibss.last_scan_completed = jiffies;
+- ieee80211_queue_work(&local->hw, &sdata->work);
+ }
+ mutex_unlock(&local->iflist_mtx);
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 817098add1d6..afcc67a157fd 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1299,17 +1299,6 @@ out:
+ sdata_unlock(sdata);
+ }
+
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
+-{
+- struct ieee80211_sub_if_data *sdata;
+-
+- rcu_read_lock();
+- list_for_each_entry_rcu(sdata, &local->interfaces, list)
+- if (ieee80211_vif_is_mesh(&sdata->vif) &&
+- ieee80211_sdata_running(sdata))
+- ieee80211_queue_work(&local->hw, &sdata->work);
+- rcu_read_unlock();
+-}
+
+ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index 50c8473cf9dc..472bdc73e950 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -358,14 +358,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
+ }
+
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
+-
+ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+ void ieee80211s_stop(void);
+ #else
+-static inline void
+-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
+ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ { return false; }
+ static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index a93906103f8b..844825829992 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4002,8 +4002,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+ if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.monitor_work);
+- /* and do all the other regular work too */
+- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ }
+ }
+
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 7bb6a9383f58..ee9351affa5b 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -310,6 +310,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ bool was_scanning = local->scanning;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_sub_if_data *scan_sdata;
++ struct ieee80211_sub_if_data *sdata;
+
+ lockdep_assert_held(&local->mtx);
+
+@@ -369,7 +370,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+
+ ieee80211_mlme_notify_scan_completed(local);
+ ieee80211_ibss_notify_scan_completed(local);
+- ieee80211_mesh_notify_scan_completed(local);
++
++ /* Requeue all the work that might have been ignored while
++ * the scan was in progress; if there was none this will
++ * just be a no-op for the particular interface.
++ */
++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ if (ieee80211_sdata_running(sdata))
++ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
++ }
++
+ if (was_scanning)
+ ieee80211_start_next_roc(local);
+ }
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index fa7cd792791c..a97bb7332607 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1081,17 +1081,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ return res;
+ }
+
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+- bool r;
+-
+- mutex_lock(&data->mtx);
+- r = !list_empty(&data->events);
+- mutex_unlock(&data->mtx);
+-
+- return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+ {
+@@ -1108,8 +1097,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ goto out;
+ }
+ mutex_unlock(&data->mtx);
++ /* since we re-check and it just compares pointers,
++ * using !list_empty() without locking isn't a problem
++ */
+ ret = wait_event_interruptible(data->read_wait,
+- rfkill_readable(data));
++ !list_empty(&data->events));
+ mutex_lock(&data->mtx);
+
+ if (ret)
+diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
+index 23e78dcd12bf..38b64f487315 100755
+--- a/scripts/bloat-o-meter
++++ b/scripts/bloat-o-meter
+@@ -58,8 +58,8 @@ for name in common:
+ delta.sort()
+ delta.reverse()
+
+-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+- (add, remove, grow, shrink, up, -down, up-down)
+-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
++print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
++ (add, remove, grow, shrink, up, -down, up-down))
++print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
+ for d, n in delta:
+- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
++ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index b123c42e7dc8..b554d7f9e3be 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -44,6 +44,13 @@
+ #include <sound/compress_offload.h>
+ #include <sound/compress_driver.h>
+
++/* struct snd_compr_codec_caps overflows the ioctl bit size for some
++ * architectures, so we need to disable the relevant ioctls.
++ */
++#if _IOC_SIZEBITS < 14
++#define COMPR_CODEC_CAPS_OVERFLOW
++#endif
++
+ /* TODO:
+ * - add substream support for multiple devices in case of
+ * SND_DYNAMIC_MINORS is not used
+@@ -438,6 +445,7 @@ out:
+ return retval;
+ }
+
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ static int
+ snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
+ {
+@@ -461,6 +469,7 @@ out:
+ kfree(caps);
+ return retval;
+ }
++#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
+
+ /* revisit this with snd_pcm_preallocate_xxx */
+ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
+@@ -799,9 +808,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+ retval = snd_compr_get_caps(stream, arg);
+ break;
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+ retval = snd_compr_get_codec_caps(stream, arg);
+ break;
++#endif
+ case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
+ retval = snd_compr_set_params(stream, arg);
+ break;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 58550cc93f28..33e72c809e50 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
+ return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++ bool trylock)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hw_params *params, *sparams;
+@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+ struct snd_mask sformat_mask;
+ struct snd_mask mask;
+
+- if (mutex_lock_interruptible(&runtime->oss.params_lock))
++ if (trylock) {
++ if (!(mutex_trylock(&runtime->oss.params_lock)))
++ return -EAGAIN;
++ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -EINTR;
+ sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
+@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
+ if (asubstream == NULL)
+ asubstream = substream;
+ if (substream->runtime->oss.params) {
+- err = snd_pcm_oss_change_params(substream);
++ err = snd_pcm_oss_change_params(substream, false);
+ if (err < 0)
+ return err;
+ }
+@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
+ return 0;
+ runtime = substream->runtime;
+ if (runtime->oss.params) {
+- err = snd_pcm_oss_change_params(substream);
++ err = snd_pcm_oss_change_params(substream, false);
+ if (err < 0)
+ return err;
+ }
+@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
+ runtime = substream->runtime;
+
+ if (runtime->oss.params &&
+- (err = snd_pcm_oss_change_params(substream)) < 0)
++ (err = snd_pcm_oss_change_params(substream, false)) < 0)
+ return err;
+
+ info.fragsize = runtime->oss.period_bytes;
+@@ -2800,7 +2804,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
+ return -EIO;
+
+ if (runtime->oss.params) {
+- if ((err = snd_pcm_oss_change_params(substream)) < 0)
++ /* use mutex_trylock() for params_lock for avoiding a deadlock
++ * between mmap_sem and params_lock taken by
++ * copy_from/to_user() in snd_pcm_oss_write/read()
++ */
++ err = snd_pcm_oss_change_params(substream, true);
++ if (err < 0)
+ return err;
+ }
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index a7759846fbaa..795437b10082 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
+ unsigned long flags;
+ long result = 0, count1;
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
++ unsigned long appl_ptr;
+
++ spin_lock_irqsave(&runtime->lock, flags);
+ while (count > 0 && runtime->avail) {
+ count1 = runtime->buffer_size - runtime->appl_ptr;
+ if (count1 > count)
+ count1 = count;
+- spin_lock_irqsave(&runtime->lock, flags);
+ if (count1 > (int)runtime->avail)
+ count1 = runtime->avail;
++
++ /* update runtime->appl_ptr before unlocking for userbuf */
++ appl_ptr = runtime->appl_ptr;
++ runtime->appl_ptr += count1;
++ runtime->appl_ptr %= runtime->buffer_size;
++ runtime->avail -= count1;
++
+ if (kernelbuf)
+- memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
++ memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
+ if (userbuf) {
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ if (copy_to_user(userbuf + result,
+- runtime->buffer + runtime->appl_ptr, count1)) {
++ runtime->buffer + appl_ptr, count1)) {
+ return result > 0 ? result : -EFAULT;
+ }
+ spin_lock_irqsave(&runtime->lock, flags);
+ }
+- runtime->appl_ptr += count1;
+- runtime->appl_ptr %= runtime->buffer_size;
+- runtime->avail -= count1;
+- spin_unlock_irqrestore(&runtime->lock, flags);
+ result += count1;
+ count -= count1;
+ }
++ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
+ }
+
+@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
+ EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
+
+ /**
+- * snd_rawmidi_transmit_peek - copy data from the internal buffer
++ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+- * Copies data from the internal output buffer to the given buffer.
+- *
+- * Call this in the interrupt handler when the midi output is ready,
+- * and call snd_rawmidi_transmit_ack() after the transmission is
+- * finished.
+- *
+- * Return: The size of copied data, or a negative error code on failure.
++ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
+ */
+-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count)
+ {
+- unsigned long flags;
+ int result, count1;
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+
+@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ return -EINVAL;
+ }
+ result = 0;
+- spin_lock_irqsave(&runtime->lock, flags);
+ if (runtime->avail >= runtime->buffer_size) {
+ /* warning: lowlevel layer MUST trigger down the hardware */
+ goto __skip;
+@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ }
+ }
+ __skip:
++ return result;
++}
++EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
++
++/**
++ * snd_rawmidi_transmit_peek - copy data from the internal buffer
++ * @substream: the rawmidi substream
++ * @buffer: the buffer pointer
++ * @count: data size to transfer
++ *
++ * Copies data from the internal output buffer to the given buffer.
++ *
++ * Call this in the interrupt handler when the midi output is ready,
++ * and call snd_rawmidi_transmit_ack() after the transmission is
++ * finished.
++ *
++ * Return: The size of copied data, or a negative error code on failure.
++ */
++int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++ unsigned char *buffer, int count)
++{
++ struct snd_rawmidi_runtime *runtime = substream->runtime;
++ int result;
++ unsigned long flags;
++
++ spin_lock_irqsave(&runtime->lock, flags);
++ result = __snd_rawmidi_transmit_peek(substream, buffer, count);
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
+ }
+ EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
+
+ /**
+- * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * __snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+- * Advances the hardware pointer for the internal output buffer with
+- * the given size and updates the condition.
+- * Call after the transmission is finished.
+- *
+- * Return: The advanced size if successful, or a negative error code on failure.
++ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
+ */
+-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ {
+- unsigned long flags;
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+
+ if (runtime->buffer == NULL) {
+@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ "snd_rawmidi_transmit_ack: output is not active!!!\n");
+ return -EINVAL;
+ }
+- spin_lock_irqsave(&runtime->lock, flags);
+ snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
+ runtime->hw_ptr += count;
+ runtime->hw_ptr %= runtime->buffer_size;
+@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ if (runtime->drain || snd_rawmidi_ready(substream))
+ wake_up(&runtime->sleep);
+ }
+- spin_unlock_irqrestore(&runtime->lock, flags);
+ return count;
+ }
++EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
++
++/**
++ * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * @substream: the rawmidi substream
++ * @count: the transferred count
++ *
++ * Advances the hardware pointer for the internal output buffer with
++ * the given size and updates the condition.
++ * Call after the transmission is finished.
++ *
++ * Return: The advanced size if successful, or a negative error code on failure.
++ */
++int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++{
++ struct snd_rawmidi_runtime *runtime = substream->runtime;
++ int result;
++ unsigned long flags;
++
++ spin_lock_irqsave(&runtime->lock, flags);
++ result = __snd_rawmidi_transmit_ack(substream, count);
++ spin_unlock_irqrestore(&runtime->lock, flags);
++ return result;
++}
+ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
+
+ /**
+@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count)
+ {
++ struct snd_rawmidi_runtime *runtime = substream->runtime;
++ int result;
++ unsigned long flags;
++
++ spin_lock_irqsave(&runtime->lock, flags);
+ if (!substream->opened)
+- return -EBADFD;
+- count = snd_rawmidi_transmit_peek(substream, buffer, count);
+- if (count < 0)
+- return count;
+- return snd_rawmidi_transmit_ack(substream, count);
++ result = -EBADFD;
++ else {
++ count = __snd_rawmidi_transmit_peek(substream, buffer, count);
++ if (count <= 0)
++ result = count;
++ else
++ result = __snd_rawmidi_transmit_ack(substream, count);
++ }
++ spin_unlock_irqrestore(&runtime->lock, flags);
++ return result;
+ }
+ EXPORT_SYMBOL(snd_rawmidi_transmit);
+
+@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ unsigned long flags;
+ long count1, result;
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
++ unsigned long appl_ptr;
+
+- if (snd_BUG_ON(!kernelbuf && !userbuf))
++ if (!kernelbuf && !userbuf)
+ return -EINVAL;
+ if (snd_BUG_ON(!runtime->buffer))
+ return -EINVAL;
+@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ count1 = count;
+ if (count1 > (long)runtime->avail)
+ count1 = runtime->avail;
++
++ /* update runtime->appl_ptr before unlocking for userbuf */
++ appl_ptr = runtime->appl_ptr;
++ runtime->appl_ptr += count1;
++ runtime->appl_ptr %= runtime->buffer_size;
++ runtime->avail -= count1;
++
+ if (kernelbuf)
+- memcpy(runtime->buffer + runtime->appl_ptr,
++ memcpy(runtime->buffer + appl_ptr,
+ kernelbuf + result, count1);
+ else if (userbuf) {
+ spin_unlock_irqrestore(&runtime->lock, flags);
+- if (copy_from_user(runtime->buffer + runtime->appl_ptr,
++ if (copy_from_user(runtime->buffer + appl_ptr,
+ userbuf + result, count1)) {
+ spin_lock_irqsave(&runtime->lock, flags);
+ result = result > 0 ? result : -EFAULT;
+@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ }
+ spin_lock_irqsave(&runtime->lock, flags);
+ }
+- runtime->appl_ptr += count1;
+- runtime->appl_ptr %= runtime->buffer_size;
+- runtime->avail -= count1;
+ result += count1;
+ count -= count1;
+ }
+diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
+index 2de3feff70d0..dad5b1123e46 100644
+--- a/sound/core/seq/oss/seq_oss_init.c
++++ b/sound/core/seq/oss/seq_oss_init.c
+@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
+
+ dp->index = i;
+ if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
+- pr_err("ALSA: seq_oss: too many applications\n");
++ pr_debug("ALSA: seq_oss: too many applications\n");
+ rc = -ENOMEM;
+ goto _error;
+ }
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index 48e4fe1b68ab..f38cf91b4faf 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
+ struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
+
+- if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
++ if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+ return;
+ for (i = 0; i < dp->max_synthdev; i++) {
+ info = &dp->synths[i];
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index bd4741442909..ce6703ecfcef 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ else
+ down_read(&grp->list_mutex);
+ list_for_each_entry(subs, &grp->list_head, src_list) {
++ /* both ports ready? */
++ if (atomic_read(&subs->ref_count) != 2)
++ continue;
+ event->dest = subs->info.dest;
+ if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
+ /* convert time according to flag with subscription */
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 55170a20ae72..921fb2bd8fad 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ }
+
+ /* */
+-enum group_type {
+- SRC_LIST, DEST_LIST
+-};
+-
+ static int subscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_port_subs_info *grp,
+@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ return NULL;
+ }
+
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++ struct snd_seq_client_port *port,
++ struct snd_seq_subscribers *subs,
++ bool is_src, bool ack);
++
++static inline struct snd_seq_subscribers *
++get_subscriber(struct list_head *p, bool is_src)
++{
++ if (is_src)
++ return list_entry(p, struct snd_seq_subscribers, src_list);
++ else
++ return list_entry(p, struct snd_seq_subscribers, dest_list);
++}
++
+ /*
+ * remove all subscribers on the list
+ * this is called from port_delete, for each src and dest list.
+@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ static void clear_subscriber_list(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_port_subs_info *grp,
+- int grptype)
++ int is_src)
+ {
+ struct list_head *p, *n;
+
+@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ struct snd_seq_client *c;
+ struct snd_seq_client_port *aport;
+
+- if (grptype == SRC_LIST) {
+- subs = list_entry(p, struct snd_seq_subscribers, src_list);
++ subs = get_subscriber(p, is_src);
++ if (is_src)
+ aport = get_client_port(&subs->info.dest, &c);
+- } else {
+- subs = list_entry(p, struct snd_seq_subscribers, dest_list);
++ else
+ aport = get_client_port(&subs->info.sender, &c);
+- }
+- list_del(p);
+- unsubscribe_port(client, port, grp, &subs->info, 0);
++ delete_and_unsubscribe_port(client, port, subs, is_src, false);
++
+ if (!aport) {
+ /* looks like the connected port is being deleted.
+ * we decrease the counter, and when both ports are deleted
+@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ */
+ if (atomic_dec_and_test(&subs->ref_count))
+ kfree(subs);
+- } else {
+- /* ok we got the connected port */
+- struct snd_seq_port_subs_info *agrp;
+- agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
+- down_write(&agrp->list_mutex);
+- if (grptype == SRC_LIST)
+- list_del(&subs->dest_list);
+- else
+- list_del(&subs->src_list);
+- up_write(&agrp->list_mutex);
+- unsubscribe_port(c, aport, agrp, &subs->info, 1);
+- kfree(subs);
+- snd_seq_port_unlock(aport);
+- snd_seq_client_unlock(c);
++ continue;
+ }
++
++ /* ok we got the connected port */
++ delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
++ kfree(subs);
++ snd_seq_port_unlock(aport);
++ snd_seq_client_unlock(c);
+ }
+ }
+
+@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
+ snd_use_lock_sync(&port->use_lock);
+
+ /* clear subscribers info */
+- clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
+- clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
++ clear_subscriber_list(client, port, &port->c_src, true);
++ clear_subscriber_list(client, port, &port->c_dest, false);
+
+ if (port->private_free)
+ port->private_free(port->private_data);
+@@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
+ return 0;
+ }
+
+-
+-/* connect two ports */
+-int snd_seq_port_connect(struct snd_seq_client *connector,
+- struct snd_seq_client *src_client,
+- struct snd_seq_client_port *src_port,
+- struct snd_seq_client *dest_client,
+- struct snd_seq_client_port *dest_port,
+- struct snd_seq_port_subscribe *info)
++static int check_and_subscribe_port(struct snd_seq_client *client,
++ struct snd_seq_client_port *port,
++ struct snd_seq_subscribers *subs,
++ bool is_src, bool exclusive, bool ack)
+ {
+- struct snd_seq_port_subs_info *src = &src_port->c_src;
+- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+- struct snd_seq_subscribers *subs, *s;
+- int err, src_called = 0;
+- unsigned long flags;
+- int exclusive;
++ struct snd_seq_port_subs_info *grp;
++ struct list_head *p;
++ struct snd_seq_subscribers *s;
++ int err;
+
+- subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+- if (! subs)
+- return -ENOMEM;
+-
+- subs->info = *info;
+- atomic_set(&subs->ref_count, 2);
+-
+- down_write(&src->list_mutex);
+- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+- exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
++ grp = is_src ? &port->c_src : &port->c_dest;
+ err = -EBUSY;
++ down_write(&grp->list_mutex);
+ if (exclusive) {
+- if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
++ if (!list_empty(&grp->list_head))
+ goto __error;
+ } else {
+- if (src->exclusive || dest->exclusive)
++ if (grp->exclusive)
+ goto __error;
+ /* check whether already exists */
+- list_for_each_entry(s, &src->list_head, src_list) {
+- if (match_subs_info(info, &s->info))
+- goto __error;
+- }
+- list_for_each_entry(s, &dest->list_head, dest_list) {
+- if (match_subs_info(info, &s->info))
++ list_for_each(p, &grp->list_head) {
++ s = get_subscriber(p, is_src);
++ if (match_subs_info(&subs->info, &s->info))
+ goto __error;
+ }
+ }
+
+- if ((err = subscribe_port(src_client, src_port, src, info,
+- connector->number != src_client->number)) < 0)
+- goto __error;
+- src_called = 1;
+-
+- if ((err = subscribe_port(dest_client, dest_port, dest, info,
+- connector->number != dest_client->number)) < 0)
++ err = subscribe_port(client, port, grp, &subs->info, ack);
++ if (err < 0) {
++ grp->exclusive = 0;
+ goto __error;
++ }
+
+ /* add to list */
+- write_lock_irqsave(&src->list_lock, flags);
+- // write_lock(&dest->list_lock); // no other lock yet
+- list_add_tail(&subs->src_list, &src->list_head);
+- list_add_tail(&subs->dest_list, &dest->list_head);
+- // write_unlock(&dest->list_lock); // no other lock yet
+- write_unlock_irqrestore(&src->list_lock, flags);
++ write_lock_irq(&grp->list_lock);
++ if (is_src)
++ list_add_tail(&subs->src_list, &grp->list_head);
++ else
++ list_add_tail(&subs->dest_list, &grp->list_head);
++ grp->exclusive = exclusive;
++ atomic_inc(&subs->ref_count);
++ write_unlock_irq(&grp->list_lock);
++ err = 0;
++
++ __error:
++ up_write(&grp->list_mutex);
++ return err;
++}
+
+- src->exclusive = dest->exclusive = exclusive;
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++ struct snd_seq_client_port *port,
++ struct snd_seq_subscribers *subs,
++ bool is_src, bool ack)
++{
++ struct snd_seq_port_subs_info *grp;
++
++ grp = is_src ? &port->c_src : &port->c_dest;
++ down_write(&grp->list_mutex);
++ write_lock_irq(&grp->list_lock);
++ if (is_src)
++ list_del(&subs->src_list);
++ else
++ list_del(&subs->dest_list);
++ grp->exclusive = 0;
++ write_unlock_irq(&grp->list_lock);
++ up_write(&grp->list_mutex);
++
++ unsubscribe_port(client, port, grp, &subs->info, ack);
++}
++
++/* connect two ports */
++int snd_seq_port_connect(struct snd_seq_client *connector,
++ struct snd_seq_client *src_client,
++ struct snd_seq_client_port *src_port,
++ struct snd_seq_client *dest_client,
++ struct snd_seq_client_port *dest_port,
++ struct snd_seq_port_subscribe *info)
++{
++ struct snd_seq_subscribers *subs;
++ bool exclusive;
++ int err;
++
++ subs = kzalloc(sizeof(*subs), GFP_KERNEL);
++ if (!subs)
++ return -ENOMEM;
++
++ subs->info = *info;
++ atomic_set(&subs->ref_count, 0);
++ INIT_LIST_HEAD(&subs->src_list);
++ INIT_LIST_HEAD(&subs->dest_list);
++
++ exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
++
++ err = check_and_subscribe_port(src_client, src_port, subs, true,
++ exclusive,
++ connector->number != src_client->number);
++ if (err < 0)
++ goto error;
++ err = check_and_subscribe_port(dest_client, dest_port, subs, false,
++ exclusive,
++ connector->number != dest_client->number);
++ if (err < 0)
++ goto error_dest;
+
+- up_write(&dest->list_mutex);
+- up_write(&src->list_mutex);
+ return 0;
+
+- __error:
+- if (src_called)
+- unsubscribe_port(src_client, src_port, src, info,
+- connector->number != src_client->number);
++ error_dest:
++ delete_and_unsubscribe_port(src_client, src_port, subs, true,
++ connector->number != src_client->number);
++ error:
+ kfree(subs);
+- up_write(&dest->list_mutex);
+- up_write(&src->list_mutex);
+ return err;
+ }
+
+-
+ /* remove the connection */
+ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ struct snd_seq_client *src_client,
+@@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ struct snd_seq_port_subscribe *info)
+ {
+ struct snd_seq_port_subs_info *src = &src_port->c_src;
+- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+ struct snd_seq_subscribers *subs;
+ int err = -ENOENT;
+- unsigned long flags;
+
+ down_write(&src->list_mutex);
+- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+ /* look for the connection */
+ list_for_each_entry(subs, &src->list_head, src_list) {
+ if (match_subs_info(info, &subs->info)) {
+- write_lock_irqsave(&src->list_lock, flags);
+- // write_lock(&dest->list_lock); // no lock yet
+- list_del(&subs->src_list);
+- list_del(&subs->dest_list);
+- // write_unlock(&dest->list_lock);
+- write_unlock_irqrestore(&src->list_lock, flags);
+- src->exclusive = dest->exclusive = 0;
+- unsubscribe_port(src_client, src_port, src, info,
+- connector->number != src_client->number);
+- unsubscribe_port(dest_client, dest_port, dest, info,
+- connector->number != dest_client->number);
+- kfree(subs);
++ atomic_dec(&subs->ref_count); /* mark as not ready */
+ err = 0;
+ break;
+ }
+ }
+-
+- up_write(&dest->list_mutex);
+ up_write(&src->list_mutex);
+- return err;
++ if (err < 0)
++ return err;
++
++ delete_and_unsubscribe_port(src_client, src_port, subs, true,
++ connector->number != src_client->number);
++ delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
++ connector->number != dest_client->number);
++ kfree(subs);
++ return 0;
+ }
+
+
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 186f1611103c..a2468f1101d1 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
+
+ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&tmr->lock, flags);
+ /* setup defaults */
+ tmr->ppq = 96; /* 96 PPQ */
+ tmr->tempo = 500000; /* 120 BPM */
+@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ tmr->preferred_resolution = seq_default_timer_resolution;
+
+ tmr->skew = tmr->skew_base = SKEW_BASE;
++ spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+
+-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
++static void seq_timer_reset(struct snd_seq_timer *tmr)
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&tmr->lock, flags);
+-
+ /* reset time & songposition */
+ tmr->cur_time.tv_sec = 0;
+ tmr->cur_time.tv_nsec = 0;
+
+ tmr->tick.cur_tick = 0;
+ tmr->tick.fraction = 0;
++}
++
++void snd_seq_timer_reset(struct snd_seq_timer *tmr)
++{
++ unsigned long flags;
+
++ spin_lock_irqsave(&tmr->lock, flags);
++ seq_timer_reset(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+
+@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ tmr = q->timer;
+ if (tmr == NULL)
+ return;
+- if (!tmr->running)
++ spin_lock_irqsave(&tmr->lock, flags);
++ if (!tmr->running) {
++ spin_unlock_irqrestore(&tmr->lock, flags);
+ return;
++ }
+
+ resolution *= ticks;
+ if (tmr->skew != tmr->skew_base) {
+@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ (((resolution & 0xffff) * tmr->skew) >> 16);
+ }
+
+- spin_lock_irqsave(&tmr->lock, flags);
+-
+ /* update timer */
+ snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
+
+@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ t->callback = snd_seq_timer_interrupt;
+ t->callback_data = q;
+ t->flags |= SNDRV_TIMER_IFLG_AUTO;
++ spin_lock_irq(&tmr->lock);
+ tmr->timeri = t;
++ spin_unlock_irq(&tmr->lock);
+ return 0;
+ }
+
+ int snd_seq_timer_close(struct snd_seq_queue *q)
+ {
+ struct snd_seq_timer *tmr;
++ struct snd_timer_instance *t;
+
+ tmr = q->timer;
+ if (snd_BUG_ON(!tmr))
+ return -EINVAL;
+- if (tmr->timeri) {
+- snd_timer_stop(tmr->timeri);
+- snd_timer_close(tmr->timeri);
+- tmr->timeri = NULL;
+- }
++ spin_lock_irq(&tmr->lock);
++ t = tmr->timeri;
++ tmr->timeri = NULL;
++ spin_unlock_irq(&tmr->lock);
++ if (t)
++ snd_timer_close(t);
+ return 0;
+ }
+
+-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
++static int seq_timer_stop(struct snd_seq_timer *tmr)
+ {
+ if (! tmr->timeri)
+ return -EINVAL;
+@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+ return 0;
+ }
+
++int snd_seq_timer_stop(struct snd_seq_timer *tmr)
++{
++ unsigned long flags;
++ int err;
++
++ spin_lock_irqsave(&tmr->lock, flags);
++ err = seq_timer_stop(tmr);
++ spin_unlock_irqrestore(&tmr->lock, flags);
++ return err;
++}
++
+ static int initialize_timer(struct snd_seq_timer *tmr)
+ {
+ struct snd_timer *t;
+@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
+ return 0;
+ }
+
+-int snd_seq_timer_start(struct snd_seq_timer * tmr)
++static int seq_timer_start(struct snd_seq_timer *tmr)
+ {
+ if (! tmr->timeri)
+ return -EINVAL;
+ if (tmr->running)
+- snd_seq_timer_stop(tmr);
+- snd_seq_timer_reset(tmr);
++ seq_timer_stop(tmr);
++ seq_timer_reset(tmr);
+ if (initialize_timer(tmr) < 0)
+ return -EINVAL;
+ snd_timer_start(tmr->timeri, tmr->ticks);
+@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
+ return 0;
+ }
+
+-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
++int snd_seq_timer_start(struct snd_seq_timer *tmr)
++{
++ unsigned long flags;
++ int err;
++
++ spin_lock_irqsave(&tmr->lock, flags);
++ err = seq_timer_start(tmr);
++ spin_unlock_irqrestore(&tmr->lock, flags);
++ return err;
++}
++
++static int seq_timer_continue(struct snd_seq_timer *tmr)
+ {
+ if (! tmr->timeri)
+ return -EINVAL;
+ if (tmr->running)
+ return -EBUSY;
+ if (! tmr->initialized) {
+- snd_seq_timer_reset(tmr);
++ seq_timer_reset(tmr);
+ if (initialize_timer(tmr) < 0)
+ return -EINVAL;
+ }
+@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+ return 0;
+ }
+
++int snd_seq_timer_continue(struct snd_seq_timer *tmr)
++{
++ unsigned long flags;
++ int err;
++
++ spin_lock_irqsave(&tmr->lock, flags);
++ err = seq_timer_continue(tmr);
++ spin_unlock_irqrestore(&tmr->lock, flags);
++ return err;
++}
++
+ /* return current 'real' time. use timeofday() to get better granularity. */
+ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ {
+ snd_seq_real_time_t cur_time;
++ unsigned long flags;
+
++ spin_lock_irqsave(&tmr->lock, flags);
+ cur_time = tmr->cur_time;
+ if (tmr->running) {
+ struct timeval tm;
+@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ }
+ snd_seq_sanity_real_time(&cur_time);
+ }
+-
++ spin_unlock_irqrestore(&tmr->lock, flags);
+ return cur_time;
+ }
+
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 56e0f4cd3f82..81134e067184 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ struct snd_virmidi *vmidi = substream->runtime->private_data;
+ int count, res;
+ unsigned char buf[32], *pbuf;
++ unsigned long flags;
+
+ if (up) {
+ vmidi->trigger = 1;
+ if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
+ !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
+- snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
+- return; /* ignored */
++ while (snd_rawmidi_transmit(substream, buf,
++ sizeof(buf)) > 0) {
++ /* ignored */
++ }
++ return;
+ }
+ if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+ return;
+ vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ }
++ spin_lock_irqsave(&substream->runtime->lock, flags);
+ while (1) {
+- count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
++ count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ if (count <= 0)
+ break;
+ pbuf = buf;
+@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ snd_midi_event_reset_encode(vmidi->parser);
+ continue;
+ }
+- snd_rawmidi_transmit_ack(substream, res);
++ __snd_rawmidi_transmit_ack(substream, res);
+ pbuf += res;
+ count -= res;
+ if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+- return;
++ goto out;
+ vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ }
+ }
+ }
++ out:
++ spin_unlock_irqrestore(&substream->runtime->lock, flags);
+ } else {
+ vmidi->trigger = 0;
+ }
+@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
+ */
+ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
++ struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ struct snd_virmidi *vmidi = substream->runtime->private_data;
+- snd_midi_event_free(vmidi->parser);
++
++ write_lock_irq(&rdev->filelist_lock);
+ list_del(&vmidi->list);
++ write_unlock_irq(&rdev->filelist_lock);
++ snd_midi_event_free(vmidi->parser);
+ substream->runtime->private_data = NULL;
+ kfree(vmidi);
+ return 0;
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index a419878901c4..00e8c5f4de17 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -305,8 +305,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ return 0;
+ }
+
+-static int _snd_timer_stop(struct snd_timer_instance *timeri,
+- int keep_flag, int event);
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
+
+ /*
+ * close a timer instance
+@@ -348,7 +347,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ spin_unlock_irq(&timer->lock);
+ mutex_lock(&register_mutex);
+ list_del(&timeri->open_list);
+- if (timer && list_empty(&timer->open_list_head) &&
++ if (list_empty(&timer->open_list_head) &&
+ timer->hw.close)
+ timer->hw.close(timer);
+ /* remove slave links */
+@@ -452,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ unsigned long flags;
+
+ spin_lock_irqsave(&slave_active_lock, flags);
++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++ spin_unlock_irqrestore(&slave_active_lock, flags);
++ return -EBUSY;
++ }
+ timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+ if (timeri->master && timeri->timer) {
+ spin_lock(&timeri->timer->lock);
+@@ -476,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ return -EINVAL;
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+ result = snd_timer_start_slave(timeri);
+- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++ if (result >= 0)
++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ return result;
+ }
+ timer = timeri->timer;
+@@ -485,16 +489,22 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ if (timer->card && timer->card->shutdown)
+ return -ENODEV;
+ spin_lock_irqsave(&timer->lock, flags);
++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++ SNDRV_TIMER_IFLG_START)) {
++ result = -EBUSY;
++ goto unlock;
++ }
+ timeri->ticks = timeri->cticks = ticks;
+ timeri->pticks = 0;
+ result = snd_timer_start1(timer, timeri, ticks);
++ unlock:
+ spin_unlock_irqrestore(&timer->lock, flags);
+- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++ if (result >= 0)
++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ return result;
+ }
+
+-static int _snd_timer_stop(struct snd_timer_instance * timeri,
+- int keep_flag, int event)
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ {
+ struct snd_timer *timer;
+ unsigned long flags;
+@@ -503,19 +513,26 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ return -ENXIO;
+
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+- if (!keep_flag) {
+- spin_lock_irqsave(&slave_active_lock, flags);
+- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+- list_del_init(&timeri->ack_list);
+- list_del_init(&timeri->active_list);
++ spin_lock_irqsave(&slave_active_lock, flags);
++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+ spin_unlock_irqrestore(&slave_active_lock, flags);
++ return -EBUSY;
+ }
++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++ list_del_init(&timeri->ack_list);
++ list_del_init(&timeri->active_list);
++ spin_unlock_irqrestore(&slave_active_lock, flags);
+ goto __end;
+ }
+ timer = timeri->timer;
+ if (!timer)
+ return -EINVAL;
+ spin_lock_irqsave(&timer->lock, flags);
++ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++ SNDRV_TIMER_IFLG_START))) {
++ spin_unlock_irqrestore(&timer->lock, flags);
++ return -EBUSY;
++ }
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ if (timer->card && timer->card->shutdown) {
+@@ -534,9 +551,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ }
+ }
+ }
+- if (!keep_flag)
+- timeri->flags &=
+- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
++ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ __end:
+ if (event != SNDRV_TIMER_EVENT_RESOLUTION)
+@@ -555,7 +570,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
+ unsigned long flags;
+ int err;
+
+- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
++ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
+ if (err < 0)
+ return err;
+ timer = timeri->timer;
+@@ -587,10 +602,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ if (timer->card && timer->card->shutdown)
+ return -ENODEV;
+ spin_lock_irqsave(&timer->lock, flags);
++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++ result = -EBUSY;
++ goto unlock;
++ }
+ if (!timeri->cticks)
+ timeri->cticks = 1;
+ timeri->pticks = 0;
+ result = snd_timer_start1(timer, timeri, timer->sticks);
++ unlock:
+ spin_unlock_irqrestore(&timer->lock, flags);
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
+ return result;
+@@ -601,7 +621,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ */
+ int snd_timer_pause(struct snd_timer_instance * timeri)
+ {
+- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
++ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
+ }
+
+ /*
+@@ -724,8 +744,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ ti->cticks = ti->ticks;
+ } else {
+ ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+- if (--timer->running)
+- list_del_init(&ti->active_list);
++ --timer->running;
++ list_del_init(&ti->active_list);
+ }
+ if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
+ (ti->flags & SNDRV_TIMER_IFLG_FAST))
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index d11baaf0f0b4..96592d5ba7bf 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
+ module_param(fake_buffer, bool, 0444);
+ MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
+ #ifdef CONFIG_HIGH_RES_TIMERS
+-module_param(hrtimer, bool, 0644);
++module_param(hrtimer, bool, 0444);
+ MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
+ #endif
+
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index 98e4fc8121a1..5e547cb199f0 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
+ [6] = 0x07,
+ };
+
+-static unsigned int
+-get_formation_index(unsigned int rate)
++static int
++get_formation_index(unsigned int rate, unsigned int *index)
+ {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
+- if (snd_bebob_rate_table[i] == rate)
+- return i;
++ if (snd_bebob_rate_table[i] == rate) {
++ *index = i;
++ return 0;
++ }
+ }
+ return -EINVAL;
+ }
+@@ -367,7 +369,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
+ goto end;
+
+ /* confirm params for both streams */
+- index = get_formation_index(rate);
++ err = get_formation_index(rate, &index);
++ if (err < 0)
++ goto end;
+ pcm_channels = bebob->tx_stream_formations[index].pcm;
+ midi_channels = bebob->tx_stream_formations[index].midi;
+ amdtp_stream_set_parameters(&bebob->tx_stream,
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index b791529bf31c..8f50a257a80d 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -614,6 +614,7 @@ enum {
+ CS4208_MAC_AUTO,
+ CS4208_MBA6,
+ CS4208_MBP11,
++ CS4208_MACMINI,
+ CS4208_GPIO0,
+ };
+
+@@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
+ { .id = CS4208_GPIO0, .name = "gpio0" },
+ { .id = CS4208_MBA6, .name = "mba6" },
+ { .id = CS4208_MBP11, .name = "mbp11" },
++ { .id = CS4208_MACMINI, .name = "macmini" },
+ {}
+ };
+
+@@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+ /* codec SSID matching */
+ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
++ SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
+ SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+@@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
+ snd_hda_apply_fixup(codec, action);
+ }
+
++/* MacMini 7,1 has the inverted jack detection */
++static void cs4208_fixup_macmini(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static const struct hda_pintbl pincfgs[] = {
++ { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
++ { 0x21, 0x004be140 }, /* SPDIF: disable detect */
++ { }
++ };
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ /* HP pin (0x10) has an inverted detection */
++ codec->inv_jack_detect = 1;
++ /* disable the bogus Mic and SPDIF jack detections */
++ snd_hda_apply_pincfgs(codec, pincfgs);
++ }
++}
++
+ static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
+ .chained = true,
+ .chain_id = CS4208_GPIO0,
+ },
++ [CS4208_MACMINI] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs4208_fixup_macmini,
++ .chained = true,
++ .chain_id = CS4208_GPIO0,
++ },
+ [CS4208_GPIO0] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs4208_fixup_gpio0,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index fb9a8a5787a6..37d8ababfc04 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1118,6 +1118,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ switch (chip->usb_id) {
+ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
+ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
++ case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+@@ -1202,8 +1203,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
+ * "Playback Design" products need a 50ms delay after setting the
+ * USB interface.
+ */
+- if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
++ switch (le16_to_cpu(dev->descriptor.idVendor)) {
++ case 0x23ba: /* Playback Design */
++ case 0x0644: /* TEAC Corp. */
+ mdelay(50);
++ break;
++ }
+ }
+
+ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+@@ -1218,6 +1223,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
++ /*
++ * "TEAC Corp." products need a 20ms delay after each
++ * class compliant request
++ */
++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
++ mdelay(20);
++
+ /* Marantz/Denon devices with USB DAC functionality need a delay
+ * after each class compliant request
+ */
+@@ -1266,7 +1279,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
+ case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+- case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
++ case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+ if (fp->altsetting == 2)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
+@@ -1275,6 +1288,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+ case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
+ case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
++ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ if (fp->altsetting == 3)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index ed5461f065bd..f64a2d54d467 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -4841,13 +4841,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
+ sizeof(long) != 8) {
+ char *p;
+
+- ls = 2;
+ /* make %l into %ll */
+- p = strchr(format, 'l');
+- if (p)
++ if (ls == 1 && (p = strchr(format, 'l')))
+ memmove(p+1, p, strlen(p)+1);
+ else if (strcmp(format, "%p") == 0)
+ strcpy(format, "0x%llx");
++ ls = 2;
+ }
+ switch (ls) {
+ case -2:
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 0c74012575ac..83054ef6c1a1 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -816,7 +816,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
+
+ machine = machines__find(machines, pid);
+ if (!machine)
+- machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
++ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
+ return machine;
+ }
+